1 /* High-level loop manipulation functions.
2    Copyright (C) 2004-2018 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "cfghooks.h"
27 #include "tree-pass.h"	/* ??? for TODO_update_ssa but this isn't a pass.  */
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "fold-const.h"
31 #include "cfganal.h"
32 #include "gimplify.h"
33 #include "gimple-iterator.h"
34 #include "gimplify-me.h"
35 #include "tree-cfg.h"
36 #include "tree-ssa-loop-ivopts.h"
37 #include "tree-ssa-loop-manip.h"
38 #include "tree-ssa-loop-niter.h"
39 #include "tree-ssa-loop.h"
40 #include "tree-into-ssa.h"
41 #include "tree-ssa.h"
42 #include "cfgloop.h"
43 #include "tree-scalar-evolution.h"
44 #include "params.h"
45 #include "tree-inline.h"
46 
47 /* All bitmaps for rewriting into loop-closed SSA go on this obstack,
48    so that we can free them all at once.  */
49 static bitmap_obstack loop_renamer_obstack;
50 
51 /* Creates an induction variable with value BASE + STEP * iteration in LOOP.
52    It is expected that neither BASE nor STEP are shared with other expressions
53    (unless the sharing rules allow this).  Use VAR as a base var_decl for it
54    (if NULL, a new temporary will be created).  The increment will occur at
55    INCR_POS (after it if AFTER is true, before it otherwise).  INCR_POS and
56    AFTER can be computed using standard_iv_increment_position.  The ssa versions
57    of the variable before and after increment will be stored in VAR_BEFORE and
58    VAR_AFTER (unless they are NULL).  */
59 
60 void
create_iv(tree base,tree step,tree var,struct loop * loop,gimple_stmt_iterator * incr_pos,bool after,tree * var_before,tree * var_after)61 create_iv (tree base, tree step, tree var, struct loop *loop,
62 	   gimple_stmt_iterator *incr_pos, bool after,
63 	   tree *var_before, tree *var_after)
64 {
65   gassign *stmt;
66   gphi *phi;
67   tree initial, step1;
68   gimple_seq stmts;
69   tree vb, va;
70   enum tree_code incr_op = PLUS_EXPR;
71   edge pe = loop_preheader_edge (loop);
72 
73   if (var != NULL_TREE)
74     {
75       vb = make_ssa_name (var);
76       va = make_ssa_name (var);
77     }
78   else
79     {
80       vb = make_temp_ssa_name (TREE_TYPE (base), NULL, "ivtmp");
81       va = make_temp_ssa_name (TREE_TYPE (base), NULL, "ivtmp");
82     }
83   if (var_before)
84     *var_before = vb;
85   if (var_after)
86     *var_after = va;
87 
88   /* For easier readability of the created code, produce MINUS_EXPRs
89      when suitable.  */
90   if (TREE_CODE (step) == INTEGER_CST)
91     {
92       if (TYPE_UNSIGNED (TREE_TYPE (step)))
93 	{
94 	  step1 = fold_build1 (NEGATE_EXPR, TREE_TYPE (step), step);
95 	  if (tree_int_cst_lt (step1, step))
96 	    {
97 	      incr_op = MINUS_EXPR;
98 	      step = step1;
99 	    }
100 	}
101       else
102 	{
103 	  bool ovf;
104 
105 	  if (!tree_expr_nonnegative_warnv_p (step, &ovf)
106 	      && may_negate_without_overflow_p (step))
107 	    {
108 	      incr_op = MINUS_EXPR;
109 	      step = fold_build1 (NEGATE_EXPR, TREE_TYPE (step), step);
110 	    }
111 	}
112     }
113   if (POINTER_TYPE_P (TREE_TYPE (base)))
114     {
115       if (TREE_CODE (base) == ADDR_EXPR)
116 	mark_addressable (TREE_OPERAND (base, 0));
117       step = convert_to_ptrofftype (step);
118       if (incr_op == MINUS_EXPR)
119 	step = fold_build1 (NEGATE_EXPR, TREE_TYPE (step), step);
120       incr_op = POINTER_PLUS_EXPR;
121     }
122   /* Gimplify the step if necessary.  We put the computations in front of the
123      loop (i.e. the step should be loop invariant).  */
124   step = force_gimple_operand (step, &stmts, true, NULL_TREE);
125   if (stmts)
126     gsi_insert_seq_on_edge_immediate (pe, stmts);
127 
128   stmt = gimple_build_assign (va, incr_op, vb, step);
129   if (after)
130     gsi_insert_after (incr_pos, stmt, GSI_NEW_STMT);
131   else
132     gsi_insert_before (incr_pos, stmt, GSI_NEW_STMT);
133 
134   initial = force_gimple_operand (base, &stmts, true, var);
135   if (stmts)
136     gsi_insert_seq_on_edge_immediate (pe, stmts);
137 
138   phi = create_phi_node (vb, loop->header);
139   add_phi_arg (phi, initial, loop_preheader_edge (loop), UNKNOWN_LOCATION);
140   add_phi_arg (phi, va, loop_latch_edge (loop), UNKNOWN_LOCATION);
141 }
142 
143 /* Return the innermost superloop LOOP of USE_LOOP that is a superloop of
144    both DEF_LOOP and USE_LOOP.  */
145 
146 static inline struct loop *
find_sibling_superloop(struct loop * use_loop,struct loop * def_loop)147 find_sibling_superloop (struct loop *use_loop, struct loop *def_loop)
148 {
149   unsigned ud = loop_depth (use_loop);
150   unsigned dd = loop_depth (def_loop);
151   gcc_assert (ud > 0 && dd > 0);
152   if (ud > dd)
153     use_loop = superloop_at_depth (use_loop, dd);
154   if (ud < dd)
155     def_loop = superloop_at_depth (def_loop, ud);
156   while (loop_outer (use_loop) != loop_outer (def_loop))
157     {
158       use_loop = loop_outer (use_loop);
159       def_loop = loop_outer (def_loop);
160       gcc_assert (use_loop && def_loop);
161     }
162   return use_loop;
163 }
164 
165 /* DEF_BB is a basic block containing a DEF that needs rewriting into
166    loop-closed SSA form.  USE_BLOCKS is the set of basic blocks containing
167    uses of DEF that "escape" from the loop containing DEF_BB (i.e. blocks in
168    USE_BLOCKS are dominated by DEF_BB but not in the loop father of DEF_B).
169    ALL_EXITS[I] is the set of all basic blocks that exit loop I.
170 
171    Compute the subset of LOOP_EXITS that exit the loop containing DEF_BB
172    or one of its loop fathers, in which DEF is live.  This set is returned
173    in the bitmap LIVE_EXITS.
174 
175    Instead of computing the complete livein set of the def, we use the loop
176    nesting tree as a form of poor man's structure analysis.  This greatly
177    speeds up the analysis, which is important because this function may be
178    called on all SSA names that need rewriting, one at a time.  */
179 
180 static void
compute_live_loop_exits(bitmap live_exits,bitmap use_blocks,bitmap * loop_exits,basic_block def_bb)181 compute_live_loop_exits (bitmap live_exits, bitmap use_blocks,
182 			 bitmap *loop_exits, basic_block def_bb)
183 {
184   unsigned i;
185   bitmap_iterator bi;
186   struct loop *def_loop = def_bb->loop_father;
187   unsigned def_loop_depth = loop_depth (def_loop);
188   bitmap def_loop_exits;
189 
190   /* Normally the work list size is bounded by the number of basic
191      blocks in the largest loop.  We don't know this number, but we
192      can be fairly sure that it will be relatively small.  */
193   auto_vec<basic_block> worklist (MAX (8, n_basic_blocks_for_fn (cfun) / 128));
194 
195   EXECUTE_IF_SET_IN_BITMAP (use_blocks, 0, i, bi)
196     {
197       basic_block use_bb = BASIC_BLOCK_FOR_FN (cfun, i);
198       struct loop *use_loop = use_bb->loop_father;
199       gcc_checking_assert (def_loop != use_loop
200 			   && ! flow_loop_nested_p (def_loop, use_loop));
201       if (! flow_loop_nested_p (use_loop, def_loop))
202 	use_bb = find_sibling_superloop (use_loop, def_loop)->header;
203       if (bitmap_set_bit (live_exits, use_bb->index))
204 	worklist.safe_push (use_bb);
205     }
206 
207   /* Iterate until the worklist is empty.  */
208   while (! worklist.is_empty ())
209     {
210       edge e;
211       edge_iterator ei;
212 
213       /* Pull a block off the worklist.  */
214       basic_block bb = worklist.pop ();
215 
216       /* Make sure we have at least enough room in the work list
217 	 for all predecessors of this block.  */
218       worklist.reserve (EDGE_COUNT (bb->preds));
219 
220       /* For each predecessor block.  */
221       FOR_EACH_EDGE (e, ei, bb->preds)
222 	{
223 	  basic_block pred = e->src;
224 	  struct loop *pred_loop = pred->loop_father;
225 	  unsigned pred_loop_depth = loop_depth (pred_loop);
226 	  bool pred_visited;
227 
228 	  /* We should have met DEF_BB along the way.  */
229 	  gcc_assert (pred != ENTRY_BLOCK_PTR_FOR_FN (cfun));
230 
231 	  if (pred_loop_depth >= def_loop_depth)
232 	    {
233 	      if (pred_loop_depth > def_loop_depth)
234 		pred_loop = superloop_at_depth (pred_loop, def_loop_depth);
235 	      /* If we've reached DEF_LOOP, our train ends here.  */
236 	      if (pred_loop == def_loop)
237 		continue;
238 	    }
239 	  else if (! flow_loop_nested_p (pred_loop, def_loop))
240 	    pred = find_sibling_superloop (pred_loop, def_loop)->header;
241 
242 	  /* Add PRED to the LIVEIN set.  PRED_VISITED is true if
243 	     we had already added PRED to LIVEIN before.  */
244 	  pred_visited = !bitmap_set_bit (live_exits, pred->index);
245 
246 	  /* If we have visited PRED before, don't add it to the worklist.
247 	     If BB dominates PRED, then we're probably looking at a loop.
248 	     We're only interested in looking up in the dominance tree
249 	     because DEF_BB dominates all the uses.  */
250 	  if (pred_visited || dominated_by_p (CDI_DOMINATORS, pred, bb))
251 	    continue;
252 
253 	  worklist.quick_push (pred);
254 	}
255     }
256 
257   def_loop_exits = BITMAP_ALLOC (&loop_renamer_obstack);
258   for (struct loop *loop = def_loop;
259        loop != current_loops->tree_root;
260        loop = loop_outer (loop))
261     bitmap_ior_into (def_loop_exits, loop_exits[loop->num]);
262   bitmap_and_into (live_exits, def_loop_exits);
263   BITMAP_FREE (def_loop_exits);
264 }
265 
266 /* Add a loop-closing PHI for VAR in basic block EXIT.  */
267 
268 static void
add_exit_phi(basic_block exit,tree var)269 add_exit_phi (basic_block exit, tree var)
270 {
271   gphi *phi;
272   edge e;
273   edge_iterator ei;
274 
275   /* Check that at least one of the edges entering the EXIT block exits
276      the loop, or a superloop of that loop, that VAR is defined in.  */
277   if (flag_checking)
278     {
279       gimple *def_stmt = SSA_NAME_DEF_STMT (var);
280       basic_block def_bb = gimple_bb (def_stmt);
281       FOR_EACH_EDGE (e, ei, exit->preds)
282 	{
283 	  struct loop *aloop = find_common_loop (def_bb->loop_father,
284 						 e->src->loop_father);
285 	  if (!flow_bb_inside_loop_p (aloop, e->dest))
286 	    break;
287 	}
288       gcc_assert (e);
289     }
290 
291   phi = create_phi_node (NULL_TREE, exit);
292   create_new_def_for (var, phi, gimple_phi_result_ptr (phi));
293   FOR_EACH_EDGE (e, ei, exit->preds)
294     add_phi_arg (phi, var, e, UNKNOWN_LOCATION);
295 
296   if (dump_file && (dump_flags & TDF_DETAILS))
297     {
298       fprintf (dump_file, ";; Created LCSSA PHI: ");
299       print_gimple_stmt (dump_file, phi, 0, dump_flags);
300     }
301 }
302 
303 /* Add exit phis for VAR that is used in LIVEIN.
304    Exits of the loops are stored in LOOP_EXITS.  */
305 
306 static void
add_exit_phis_var(tree var,bitmap use_blocks,bitmap * loop_exits)307 add_exit_phis_var (tree var, bitmap use_blocks, bitmap *loop_exits)
308 {
309   unsigned index;
310   bitmap_iterator bi;
311   basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (var));
312   bitmap live_exits = BITMAP_ALLOC (&loop_renamer_obstack);
313 
314   gcc_checking_assert (! bitmap_bit_p (use_blocks, def_bb->index));
315 
316   compute_live_loop_exits (live_exits, use_blocks, loop_exits, def_bb);
317 
318   EXECUTE_IF_SET_IN_BITMAP (live_exits, 0, index, bi)
319     {
320       add_exit_phi (BASIC_BLOCK_FOR_FN (cfun, index), var);
321     }
322 
323   BITMAP_FREE (live_exits);
324 }
325 
326 /* Add exit phis for the names marked in NAMES_TO_RENAME.
327    Exits of the loops are stored in EXITS.  Sets of blocks where the ssa
328    names are used are stored in USE_BLOCKS.  */
329 
330 static void
add_exit_phis(bitmap names_to_rename,bitmap * use_blocks,bitmap * loop_exits)331 add_exit_phis (bitmap names_to_rename, bitmap *use_blocks, bitmap *loop_exits)
332 {
333   unsigned i;
334   bitmap_iterator bi;
335 
336   EXECUTE_IF_SET_IN_BITMAP (names_to_rename, 0, i, bi)
337     {
338       add_exit_phis_var (ssa_name (i), use_blocks[i], loop_exits);
339     }
340 }
341 
342 /* Fill the array of bitmaps LOOP_EXITS with all loop exit edge targets.  */
343 
344 static void
get_loops_exits(bitmap * loop_exits)345 get_loops_exits (bitmap *loop_exits)
346 {
347   struct loop *loop;
348   unsigned j;
349   edge e;
350 
351   FOR_EACH_LOOP (loop, 0)
352     {
353       vec<edge> exit_edges = get_loop_exit_edges (loop);
354       loop_exits[loop->num] = BITMAP_ALLOC (&loop_renamer_obstack);
355       FOR_EACH_VEC_ELT (exit_edges, j, e)
356         bitmap_set_bit (loop_exits[loop->num], e->dest->index);
357       exit_edges.release ();
358     }
359 }
360 
361 /* For USE in BB, if it is used outside of the loop it is defined in,
362    mark it for rewrite.  Record basic block BB where it is used
363    to USE_BLOCKS.  Record the ssa name index to NEED_PHIS bitmap.
364    Note that for USEs in phis, BB should be the src of the edge corresponding to
365    the use, rather than the bb containing the phi.  */
366 
367 static void
find_uses_to_rename_use(basic_block bb,tree use,bitmap * use_blocks,bitmap need_phis)368 find_uses_to_rename_use (basic_block bb, tree use, bitmap *use_blocks,
369 			 bitmap need_phis)
370 {
371   unsigned ver;
372   basic_block def_bb;
373   struct loop *def_loop;
374 
375   if (TREE_CODE (use) != SSA_NAME)
376     return;
377 
378   ver = SSA_NAME_VERSION (use);
379   def_bb = gimple_bb (SSA_NAME_DEF_STMT (use));
380   if (!def_bb)
381     return;
382   def_loop = def_bb->loop_father;
383 
384   /* If the definition is not inside a loop, it is not interesting.  */
385   if (!loop_outer (def_loop))
386     return;
387 
388   /* If the use is not outside of the loop it is defined in, it is not
389      interesting.  */
390   if (flow_bb_inside_loop_p (def_loop, bb))
391     return;
392 
393   /* If we're seeing VER for the first time, we still have to allocate
394      a bitmap for its uses.  */
395   if (bitmap_set_bit (need_phis, ver))
396     use_blocks[ver] = BITMAP_ALLOC (&loop_renamer_obstack);
397   bitmap_set_bit (use_blocks[ver], bb->index);
398 }
399 
400 /* For uses matching USE_FLAGS in STMT, mark names that are used outside of the
401    loop they are defined to rewrite.  Record the set of blocks in which the ssa
402    names are used to USE_BLOCKS, and the ssa names themselves to NEED_PHIS.  */
403 
404 static void
find_uses_to_rename_stmt(gimple * stmt,bitmap * use_blocks,bitmap need_phis,int use_flags)405 find_uses_to_rename_stmt (gimple *stmt, bitmap *use_blocks, bitmap need_phis,
406 			  int use_flags)
407 {
408   ssa_op_iter iter;
409   tree var;
410   basic_block bb = gimple_bb (stmt);
411 
412   if (is_gimple_debug (stmt))
413     return;
414 
415   /* FOR_EACH_SSA_TREE_OPERAND iterator does not allows SSA_OP_VIRTUAL_USES
416      only.  */
417   if (use_flags == SSA_OP_VIRTUAL_USES)
418     {
419       tree vuse = gimple_vuse (stmt);
420       if (vuse != NULL_TREE)
421 	find_uses_to_rename_use (bb, gimple_vuse (stmt), use_blocks, need_phis);
422     }
423   else
424     FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, use_flags)
425       find_uses_to_rename_use (bb, var, use_blocks, need_phis);
426 }
427 
428 /* Marks names matching USE_FLAGS that are used in BB and outside of the loop
429    they are defined in for rewrite.  Records the set of blocks in which the ssa
430    names are used to USE_BLOCKS.  Record the SSA names that will
431    need exit PHIs in NEED_PHIS.  */
432 
433 static void
find_uses_to_rename_bb(basic_block bb,bitmap * use_blocks,bitmap need_phis,int use_flags)434 find_uses_to_rename_bb (basic_block bb, bitmap *use_blocks, bitmap need_phis,
435 			int use_flags)
436 {
437   edge e;
438   edge_iterator ei;
439   bool do_virtuals = (use_flags & SSA_OP_VIRTUAL_USES) != 0;
440   bool do_nonvirtuals = (use_flags & SSA_OP_USE) != 0;
441 
442   FOR_EACH_EDGE (e, ei, bb->succs)
443     for (gphi_iterator bsi = gsi_start_phis (e->dest); !gsi_end_p (bsi);
444 	 gsi_next (&bsi))
445       {
446         gphi *phi = bsi.phi ();
447 	bool virtual_p = virtual_operand_p (gimple_phi_result (phi));
448 	if ((virtual_p && do_virtuals)
449 	    || (!virtual_p && do_nonvirtuals))
450 	  find_uses_to_rename_use (bb, PHI_ARG_DEF_FROM_EDGE (phi, e),
451 				   use_blocks, need_phis);
452       }
453 
454   for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
455        gsi_next (&bsi))
456     find_uses_to_rename_stmt (gsi_stmt (bsi), use_blocks, need_phis,
457 			      use_flags);
458 }
459 
460 /* Marks names matching USE_FLAGS that are used outside of the loop they are
461    defined in for rewrite.  Records the set of blocks in which the ssa names are
462    used to USE_BLOCKS.  Record the SSA names that will need exit PHIs in
463    NEED_PHIS.  If CHANGED_BBS is not NULL, scan only blocks in this set.  */
464 
465 static void
find_uses_to_rename(bitmap changed_bbs,bitmap * use_blocks,bitmap need_phis,int use_flags)466 find_uses_to_rename (bitmap changed_bbs, bitmap *use_blocks, bitmap need_phis,
467 		     int use_flags)
468 {
469   basic_block bb;
470   unsigned index;
471   bitmap_iterator bi;
472 
473   if (changed_bbs)
474     EXECUTE_IF_SET_IN_BITMAP (changed_bbs, 0, index, bi)
475       {
476 	bb = BASIC_BLOCK_FOR_FN (cfun, index);
477 	if (bb)
478 	  find_uses_to_rename_bb (bb, use_blocks, need_phis, use_flags);
479       }
480   else
481     FOR_EACH_BB_FN (bb, cfun)
482       find_uses_to_rename_bb (bb, use_blocks, need_phis, use_flags);
483 }
484 
485 /* Mark uses of DEF that are used outside of the loop they are defined in for
486    rewrite.  Record the set of blocks in which the ssa names are used to
487    USE_BLOCKS.  Record the SSA names that will need exit PHIs in NEED_PHIS.  */
488 
489 static void
find_uses_to_rename_def(tree def,bitmap * use_blocks,bitmap need_phis)490 find_uses_to_rename_def (tree def, bitmap *use_blocks, bitmap need_phis)
491 {
492   gimple *use_stmt;
493   imm_use_iterator imm_iter;
494 
495   FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, def)
496     {
497       if (is_gimple_debug (use_stmt))
498 	continue;
499 
500       basic_block use_bb = gimple_bb (use_stmt);
501 
502       use_operand_p use_p;
503       FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
504 	{
505 	  if (gimple_code (use_stmt) == GIMPLE_PHI)
506 	    {
507 	      edge e = gimple_phi_arg_edge (as_a <gphi *> (use_stmt),
508 					    PHI_ARG_INDEX_FROM_USE (use_p));
509 	      use_bb = e->src;
510 	    }
511 	  find_uses_to_rename_use (use_bb, USE_FROM_PTR (use_p), use_blocks,
512 				   need_phis);
513 	}
514     }
515 }
516 
517 /* Marks names matching USE_FLAGS that are defined in LOOP and used outside of
518    it for rewrite.  Records the set of blocks in which the ssa names are used to
519    USE_BLOCKS.  Record the SSA names that will need exit PHIs in NEED_PHIS.  */
520 
521 static void
find_uses_to_rename_in_loop(struct loop * loop,bitmap * use_blocks,bitmap need_phis,int use_flags)522 find_uses_to_rename_in_loop (struct loop *loop, bitmap *use_blocks,
523 			     bitmap need_phis, int use_flags)
524 {
525   bool do_virtuals = (use_flags & SSA_OP_VIRTUAL_USES) != 0;
526   bool do_nonvirtuals = (use_flags & SSA_OP_USE) != 0;
527   int def_flags = ((do_virtuals ? SSA_OP_VIRTUAL_DEFS : 0)
528 		   | (do_nonvirtuals ? SSA_OP_DEF : 0));
529 
530 
531   basic_block *bbs = get_loop_body (loop);
532 
533   for (unsigned int i = 0; i < loop->num_nodes; i++)
534     {
535       basic_block bb = bbs[i];
536 
537       for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
538 	   gsi_next (&bsi))
539 	{
540 	  gphi *phi = bsi.phi ();
541 	  tree res = gimple_phi_result (phi);
542 	  bool virtual_p = virtual_operand_p (res);
543 	  if ((virtual_p && do_virtuals)
544 	      || (!virtual_p && do_nonvirtuals))
545 	    find_uses_to_rename_def (res, use_blocks, need_phis);
546       }
547 
548       for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
549 	   gsi_next (&bsi))
550 	{
551 	  gimple *stmt = gsi_stmt (bsi);
552 	  /* FOR_EACH_SSA_TREE_OPERAND iterator does not allows
553 	     SSA_OP_VIRTUAL_DEFS only.  */
554 	  if (def_flags == SSA_OP_VIRTUAL_DEFS)
555 	    {
556 	      tree vdef = gimple_vdef (stmt);
557 	      if (vdef != NULL)
558 		find_uses_to_rename_def (vdef, use_blocks, need_phis);
559 	    }
560 	  else
561 	    {
562 	      tree var;
563 	      ssa_op_iter iter;
564 	      FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, def_flags)
565 		find_uses_to_rename_def (var, use_blocks, need_phis);
566 	    }
567 	}
568     }
569 
570   XDELETEVEC (bbs);
571 }
572 
573 /* Rewrites the program into a loop closed ssa form -- i.e. inserts extra
574    phi nodes to ensure that no variable is used outside the loop it is
575    defined in.
576 
577    This strengthening of the basic ssa form has several advantages:
578 
579    1) Updating it during unrolling/peeling/versioning is trivial, since
580       we do not need to care about the uses outside of the loop.
581       The same applies to virtual operands which are also rewritten into
582       loop closed SSA form.  Note that virtual operands are always live
583       until function exit.
584    2) The behavior of all uses of an induction variable is the same.
585       Without this, you need to distinguish the case when the variable
586       is used outside of the loop it is defined in, for example
587 
588       for (i = 0; i < 100; i++)
589 	{
590 	  for (j = 0; j < 100; j++)
591 	    {
592 	      k = i + j;
593 	      use1 (k);
594 	    }
595 	  use2 (k);
596 	}
597 
598       Looking from the outer loop with the normal SSA form, the first use of k
599       is not well-behaved, while the second one is an induction variable with
600       base 99 and step 1.
601 
602       If LOOP is non-null, only rewrite uses that have defs in LOOP.  Otherwise,
603       if CHANGED_BBS is not NULL, we look for uses outside loops only in the
604       basic blocks in this set.
605 
606       USE_FLAGS allows us to specify whether we want virtual, non-virtual or
607       both variables rewritten.
608 
609       UPDATE_FLAG is used in the call to update_ssa.  See
610       TODO_update_ssa* for documentation.  */
611 
612 void
rewrite_into_loop_closed_ssa_1(bitmap changed_bbs,unsigned update_flag,int use_flags,struct loop * loop)613 rewrite_into_loop_closed_ssa_1 (bitmap changed_bbs, unsigned update_flag,
614 				int use_flags, struct loop *loop)
615 {
616   bitmap *use_blocks;
617   bitmap names_to_rename;
618 
619   loops_state_set (LOOP_CLOSED_SSA);
620   if (number_of_loops (cfun) <= 1)
621     return;
622 
623   /* If the pass has caused the SSA form to be out-of-date, update it
624      now.  */
625   if (update_flag != 0)
626     update_ssa (update_flag);
627   else if (flag_checking)
628     verify_ssa (true, true);
629 
630   bitmap_obstack_initialize (&loop_renamer_obstack);
631 
632   names_to_rename = BITMAP_ALLOC (&loop_renamer_obstack);
633 
634   /* Uses of names to rename.  We don't have to initialize this array,
635      because we know that we will only have entries for the SSA names
636      in NAMES_TO_RENAME.  */
637   use_blocks = XNEWVEC (bitmap, num_ssa_names);
638 
639   if (loop != NULL)
640     {
641       gcc_assert (changed_bbs == NULL);
642       find_uses_to_rename_in_loop (loop, use_blocks, names_to_rename,
643 				   use_flags);
644     }
645   else
646     {
647       gcc_assert (loop == NULL);
648       find_uses_to_rename (changed_bbs, use_blocks, names_to_rename, use_flags);
649     }
650 
651   if (!bitmap_empty_p (names_to_rename))
652     {
653       /* An array of bitmaps where LOOP_EXITS[I] is the set of basic blocks
654 	 that are the destination of an edge exiting loop number I.  */
655       bitmap *loop_exits = XNEWVEC (bitmap, number_of_loops (cfun));
656       get_loops_exits (loop_exits);
657 
658       /* Add the PHI nodes on exits of the loops for the names we need to
659 	 rewrite.  */
660       add_exit_phis (names_to_rename, use_blocks, loop_exits);
661 
662       free (loop_exits);
663 
664       /* Fix up all the names found to be used outside their original
665 	 loops.  */
666       update_ssa (TODO_update_ssa);
667     }
668 
669   bitmap_obstack_release (&loop_renamer_obstack);
670   free (use_blocks);
671 }
672 
673 /* Rewrites the non-virtual defs and uses into a loop closed ssa form.  If
674    CHANGED_BBS is not NULL, we look for uses outside loops only in the basic
675    blocks in this set.  UPDATE_FLAG is used in the call to update_ssa.  See
676    TODO_update_ssa* for documentation.  */
677 
678 void
rewrite_into_loop_closed_ssa(bitmap changed_bbs,unsigned update_flag)679 rewrite_into_loop_closed_ssa (bitmap changed_bbs, unsigned update_flag)
680 {
681   rewrite_into_loop_closed_ssa_1 (changed_bbs, update_flag, SSA_OP_USE, NULL);
682 }
683 
684 /* Rewrites virtual defs and uses with def in LOOP into loop closed ssa
685    form.  */
686 
687 void
rewrite_virtuals_into_loop_closed_ssa(struct loop * loop)688 rewrite_virtuals_into_loop_closed_ssa (struct loop *loop)
689 {
690   rewrite_into_loop_closed_ssa_1 (NULL, 0, SSA_OP_VIRTUAL_USES, loop);
691 }
692 
693 /* Check invariants of the loop closed ssa form for the def in DEF_BB.  */
694 
695 static void
check_loop_closed_ssa_def(basic_block def_bb,tree def)696 check_loop_closed_ssa_def (basic_block def_bb, tree def)
697 {
698   use_operand_p use_p;
699   imm_use_iterator iterator;
700   FOR_EACH_IMM_USE_FAST (use_p, iterator, def)
701     {
702       if (is_gimple_debug (USE_STMT (use_p)))
703 	continue;
704 
705       basic_block use_bb = gimple_bb (USE_STMT (use_p));
706       if (is_a <gphi *> (USE_STMT (use_p)))
707 	use_bb = EDGE_PRED (use_bb, PHI_ARG_INDEX_FROM_USE (use_p))->src;
708 
709       gcc_assert (flow_bb_inside_loop_p (def_bb->loop_father, use_bb));
710     }
711 }
712 
713 /* Checks invariants of loop closed ssa form in BB.  */
714 
715 static void
check_loop_closed_ssa_bb(basic_block bb)716 check_loop_closed_ssa_bb (basic_block bb)
717 {
718   for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
719        gsi_next (&bsi))
720     {
721       gphi *phi = bsi.phi ();
722 
723       if (!virtual_operand_p (PHI_RESULT (phi)))
724 	check_loop_closed_ssa_def (bb, PHI_RESULT (phi));
725     }
726 
727   for (gimple_stmt_iterator bsi = gsi_start_nondebug_bb (bb); !gsi_end_p (bsi);
728        gsi_next_nondebug (&bsi))
729     {
730       ssa_op_iter iter;
731       tree var;
732       gimple *stmt = gsi_stmt (bsi);
733 
734       FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, SSA_OP_DEF)
735 	check_loop_closed_ssa_def (bb, var);
736     }
737 }
738 
739 /* Checks that invariants of the loop closed ssa form are preserved.
740    Call verify_ssa when VERIFY_SSA_P is true.  Note all loops are checked
741    if LOOP is NULL, otherwise, only LOOP is checked.  */
742 
743 DEBUG_FUNCTION void
verify_loop_closed_ssa(bool verify_ssa_p,struct loop * loop)744 verify_loop_closed_ssa (bool verify_ssa_p, struct loop *loop)
745 {
746   if (number_of_loops (cfun) <= 1)
747     return;
748 
749   if (verify_ssa_p)
750     verify_ssa (false, true);
751 
752   timevar_push (TV_VERIFY_LOOP_CLOSED);
753 
754   if (loop == NULL)
755     {
756       basic_block bb;
757 
758       FOR_EACH_BB_FN (bb, cfun)
759 	if (bb->loop_father && bb->loop_father->num > 0)
760 	  check_loop_closed_ssa_bb (bb);
761     }
762   else
763     {
764       basic_block *bbs = get_loop_body (loop);
765 
766       for (unsigned i = 0; i < loop->num_nodes; ++i)
767 	check_loop_closed_ssa_bb (bbs[i]);
768 
769       free (bbs);
770     }
771 
772   timevar_pop (TV_VERIFY_LOOP_CLOSED);
773 }
774 
775 /* Split loop exit edge EXIT.  The things are a bit complicated by a need to
776    preserve the loop closed ssa form.  The newly created block is returned.  */
777 
778 basic_block
split_loop_exit_edge(edge exit)779 split_loop_exit_edge (edge exit)
780 {
781   basic_block dest = exit->dest;
782   basic_block bb = split_edge (exit);
783   gphi *phi, *new_phi;
784   tree new_name, name;
785   use_operand_p op_p;
786   gphi_iterator psi;
787   source_location locus;
788 
789   for (psi = gsi_start_phis (dest); !gsi_end_p (psi); gsi_next (&psi))
790     {
791       phi = psi.phi ();
792       op_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (bb));
793       locus = gimple_phi_arg_location_from_edge (phi, single_succ_edge (bb));
794 
795       name = USE_FROM_PTR (op_p);
796 
797       /* If the argument of the PHI node is a constant, we do not need
798 	 to keep it inside loop.  */
799       if (TREE_CODE (name) != SSA_NAME)
800 	continue;
801 
802       /* Otherwise create an auxiliary phi node that will copy the value
803 	 of the SSA name out of the loop.  */
804       new_name = duplicate_ssa_name (name, NULL);
805       new_phi = create_phi_node (new_name, bb);
806       add_phi_arg (new_phi, name, exit, locus);
807       SET_USE (op_p, new_name);
808     }
809 
810   return bb;
811 }
812 
813 /* Returns the basic block in that statements should be emitted for induction
814    variables incremented at the end of the LOOP.  */
815 
816 basic_block
ip_end_pos(struct loop * loop)817 ip_end_pos (struct loop *loop)
818 {
819   return loop->latch;
820 }
821 
822 /* Returns the basic block in that statements should be emitted for induction
823    variables incremented just before exit condition of a LOOP.  */
824 
825 basic_block
ip_normal_pos(struct loop * loop)826 ip_normal_pos (struct loop *loop)
827 {
828   gimple *last;
829   basic_block bb;
830   edge exit;
831 
832   if (!single_pred_p (loop->latch))
833     return NULL;
834 
835   bb = single_pred (loop->latch);
836   last = last_stmt (bb);
837   if (!last
838       || gimple_code (last) != GIMPLE_COND)
839     return NULL;
840 
841   exit = EDGE_SUCC (bb, 0);
842   if (exit->dest == loop->latch)
843     exit = EDGE_SUCC (bb, 1);
844 
845   if (flow_bb_inside_loop_p (loop, exit->dest))
846     return NULL;
847 
848   return bb;
849 }
850 
851 /* Stores the standard position for induction variable increment in LOOP
852    (just before the exit condition if it is available and latch block is empty,
853    end of the latch block otherwise) to BSI.  INSERT_AFTER is set to true if
854    the increment should be inserted after *BSI.  */
855 
856 void
standard_iv_increment_position(struct loop * loop,gimple_stmt_iterator * bsi,bool * insert_after)857 standard_iv_increment_position (struct loop *loop, gimple_stmt_iterator *bsi,
858 				bool *insert_after)
859 {
860   basic_block bb = ip_normal_pos (loop), latch = ip_end_pos (loop);
861   gimple *last = last_stmt (latch);
862 
863   if (!bb
864       || (last && gimple_code (last) != GIMPLE_LABEL))
865     {
866       *bsi = gsi_last_bb (latch);
867       *insert_after = true;
868     }
869   else
870     {
871       *bsi = gsi_last_bb (bb);
872       *insert_after = false;
873     }
874 }
875 
876 /* Copies phi node arguments for duplicated blocks.  The index of the first
877    duplicated block is FIRST_NEW_BLOCK.  */
878 
879 static void
copy_phi_node_args(unsigned first_new_block)880 copy_phi_node_args (unsigned first_new_block)
881 {
882   unsigned i;
883 
884   for (i = first_new_block; i < (unsigned) last_basic_block_for_fn (cfun); i++)
885     BASIC_BLOCK_FOR_FN (cfun, i)->flags |= BB_DUPLICATED;
886 
887   for (i = first_new_block; i < (unsigned) last_basic_block_for_fn (cfun); i++)
888     add_phi_args_after_copy_bb (BASIC_BLOCK_FOR_FN (cfun, i));
889 
890   for (i = first_new_block; i < (unsigned) last_basic_block_for_fn (cfun); i++)
891     BASIC_BLOCK_FOR_FN (cfun, i)->flags &= ~BB_DUPLICATED;
892 }
893 
894 
895 /* The same as cfgloopmanip.c:duplicate_loop_to_header_edge, but also
896    updates the PHI nodes at start of the copied region.  In order to
897    achieve this, only loops whose exits all lead to the same location
898    are handled.
899 
900    Notice that we do not completely update the SSA web after
901    duplication.  The caller is responsible for calling update_ssa
902    after the loop has been duplicated.  */
903 
904 bool
gimple_duplicate_loop_to_header_edge(struct loop * loop,edge e,unsigned int ndupl,sbitmap wont_exit,edge orig,vec<edge> * to_remove,int flags)905 gimple_duplicate_loop_to_header_edge (struct loop *loop, edge e,
906 				    unsigned int ndupl, sbitmap wont_exit,
907 				    edge orig, vec<edge> *to_remove,
908 				    int flags)
909 {
910   unsigned first_new_block;
911 
912   if (!loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
913     return false;
914   if (!loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS))
915     return false;
916 
917   first_new_block = last_basic_block_for_fn (cfun);
918   if (!duplicate_loop_to_header_edge (loop, e, ndupl, wont_exit,
919 				      orig, to_remove, flags))
920     return false;
921 
922   /* Readd the removed phi args for e.  */
923   flush_pending_stmts (e);
924 
925   /* Copy the phi node arguments.  */
926   copy_phi_node_args (first_new_block);
927 
928   scev_reset ();
929 
930   return true;
931 }
932 
933 /* Returns true if we can unroll LOOP FACTOR times.  Number
934    of iterations of the loop is returned in NITER.  */
935 
936 bool
can_unroll_loop_p(struct loop * loop,unsigned factor,struct tree_niter_desc * niter)937 can_unroll_loop_p (struct loop *loop, unsigned factor,
938 		   struct tree_niter_desc *niter)
939 {
940   edge exit;
941 
942   /* Check whether unrolling is possible.  We only want to unroll loops
943      for that we are able to determine number of iterations.  We also
944      want to split the extra iterations of the loop from its end,
945      therefore we require that the loop has precisely one
946      exit.  */
947 
948   exit = single_dom_exit (loop);
949   if (!exit)
950     return false;
951 
952   if (!number_of_iterations_exit (loop, exit, niter, false)
953       || niter->cmp == ERROR_MARK
954       /* Scalar evolutions analysis might have copy propagated
955 	 the abnormal ssa names into these expressions, hence
956 	 emitting the computations based on them during loop
957 	 unrolling might create overlapping life ranges for
958 	 them, and failures in out-of-ssa.  */
959       || contains_abnormal_ssa_name_p (niter->may_be_zero)
960       || contains_abnormal_ssa_name_p (niter->control.base)
961       || contains_abnormal_ssa_name_p (niter->control.step)
962       || contains_abnormal_ssa_name_p (niter->bound))
963     return false;
964 
965   /* And of course, we must be able to duplicate the loop.  */
966   if (!can_duplicate_loop_p (loop))
967     return false;
968 
969   /* The final loop should be small enough.  */
970   if (tree_num_loop_insns (loop, &eni_size_weights) * factor
971       > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS))
972     return false;
973 
974   return true;
975 }
976 
977 /* Determines the conditions that control execution of LOOP unrolled FACTOR
978    times.  DESC is number of iterations of LOOP.  ENTER_COND is set to
979    condition that must be true if the main loop can be entered.
980    EXIT_BASE, EXIT_STEP, EXIT_CMP and EXIT_BOUND are set to values describing
981    how the exit from the unrolled loop should be controlled.  */
982 
983 static void
determine_exit_conditions(struct loop * loop,struct tree_niter_desc * desc,unsigned factor,tree * enter_cond,tree * exit_base,tree * exit_step,enum tree_code * exit_cmp,tree * exit_bound)984 determine_exit_conditions (struct loop *loop, struct tree_niter_desc *desc,
985 			   unsigned factor, tree *enter_cond,
986 			   tree *exit_base, tree *exit_step,
987 			   enum tree_code *exit_cmp, tree *exit_bound)
988 {
989   gimple_seq stmts;
990   tree base = desc->control.base;
991   tree step = desc->control.step;
992   tree bound = desc->bound;
993   tree type = TREE_TYPE (step);
994   tree bigstep, delta;
995   tree min = lower_bound_in_type (type, type);
996   tree max = upper_bound_in_type (type, type);
997   enum tree_code cmp = desc->cmp;
998   tree cond = boolean_true_node, assum;
999 
1000   /* For pointers, do the arithmetics in the type of step.  */
1001   base = fold_convert (type, base);
1002   bound = fold_convert (type, bound);
1003 
1004   *enter_cond = boolean_false_node;
1005   *exit_base = NULL_TREE;
1006   *exit_step = NULL_TREE;
1007   *exit_cmp = ERROR_MARK;
1008   *exit_bound = NULL_TREE;
1009   gcc_assert (cmp != ERROR_MARK);
1010 
1011   /* We only need to be correct when we answer question
1012      "Do at least FACTOR more iterations remain?" in the unrolled loop.
1013      Thus, transforming BASE + STEP * i <> BOUND to
1014      BASE + STEP * i < BOUND is ok.  */
1015   if (cmp == NE_EXPR)
1016     {
1017       if (tree_int_cst_sign_bit (step))
1018 	cmp = GT_EXPR;
1019       else
1020 	cmp = LT_EXPR;
1021     }
1022   else if (cmp == LT_EXPR)
1023     {
1024       gcc_assert (!tree_int_cst_sign_bit (step));
1025     }
1026   else if (cmp == GT_EXPR)
1027     {
1028       gcc_assert (tree_int_cst_sign_bit (step));
1029     }
1030   else
1031     gcc_unreachable ();
1032 
1033   /* The main body of the loop may be entered iff:
1034 
1035      1) desc->may_be_zero is false.
1036      2) it is possible to check that there are at least FACTOR iterations
1037 	of the loop, i.e., BOUND - step * FACTOR does not overflow.
1038      3) # of iterations is at least FACTOR  */
1039 
1040   if (!integer_zerop (desc->may_be_zero))
1041     cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1042 			invert_truthvalue (desc->may_be_zero),
1043 			cond);
1044 
1045   bigstep = fold_build2 (MULT_EXPR, type, step,
1046 			 build_int_cst_type (type, factor));
1047   delta = fold_build2 (MINUS_EXPR, type, bigstep, step);
1048   if (cmp == LT_EXPR)
1049     assum = fold_build2 (GE_EXPR, boolean_type_node,
1050 			 bound,
1051 			 fold_build2 (PLUS_EXPR, type, min, delta));
1052   else
1053     assum = fold_build2 (LE_EXPR, boolean_type_node,
1054 			 bound,
1055 			 fold_build2 (PLUS_EXPR, type, max, delta));
1056   cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, assum, cond);
1057 
1058   bound = fold_build2 (MINUS_EXPR, type, bound, delta);
1059   assum = fold_build2 (cmp, boolean_type_node, base, bound);
1060   cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, assum, cond);
1061 
1062   cond = force_gimple_operand (unshare_expr (cond), &stmts, false, NULL_TREE);
1063   if (stmts)
1064     gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
1065   /* cond now may be a gimple comparison, which would be OK, but also any
1066      other gimple rhs (say a && b).  In this case we need to force it to
1067      operand.  */
1068   if (!is_gimple_condexpr (cond))
1069     {
1070       cond = force_gimple_operand (cond, &stmts, true, NULL_TREE);
1071       if (stmts)
1072 	gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
1073     }
1074   *enter_cond = cond;
1075 
1076   base = force_gimple_operand (unshare_expr (base), &stmts, true, NULL_TREE);
1077   if (stmts)
1078     gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
1079   bound = force_gimple_operand (unshare_expr (bound), &stmts, true, NULL_TREE);
1080   if (stmts)
1081     gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
1082 
1083   *exit_base = base;
1084   *exit_step = bigstep;
1085   *exit_cmp = cmp;
1086   *exit_bound = bound;
1087 }
1088 
1089 /* Scales the frequencies of all basic blocks in LOOP that are strictly
1090    dominated by BB by NUM/DEN.  */
1091 
1092 static void
scale_dominated_blocks_in_loop(struct loop * loop,basic_block bb,profile_count num,profile_count den)1093 scale_dominated_blocks_in_loop (struct loop *loop, basic_block bb,
1094 				profile_count num, profile_count den)
1095 {
1096   basic_block son;
1097 
1098   if (!den.nonzero_p () && !(num == profile_count::zero ()))
1099     return;
1100 
1101   for (son = first_dom_son (CDI_DOMINATORS, bb);
1102        son;
1103        son = next_dom_son (CDI_DOMINATORS, son))
1104     {
1105       if (!flow_bb_inside_loop_p (loop, son))
1106 	continue;
1107       scale_bbs_frequencies_profile_count (&son, 1, num, den);
1108       scale_dominated_blocks_in_loop (loop, son, num, den);
1109     }
1110 }
1111 
1112 /* Return estimated niter for LOOP after unrolling by FACTOR times.  */
1113 
1114 gcov_type
niter_for_unrolled_loop(struct loop * loop,unsigned factor)1115 niter_for_unrolled_loop (struct loop *loop, unsigned factor)
1116 {
1117   gcc_assert (factor != 0);
1118   bool profile_p = false;
1119   gcov_type est_niter = expected_loop_iterations_unbounded (loop, &profile_p);
1120   /* Note that this is really CEIL (est_niter + 1, factor) - 1, where the
1121      "+ 1" converts latch iterations to loop iterations and the "- 1"
1122      converts back.  */
1123   gcov_type new_est_niter = est_niter / factor;
1124 
1125   if (est_niter == -1)
1126     return -1;
1127 
1128   /* Without profile feedback, loops for which we do not know a better estimate
1129      are assumed to roll 10 times.  When we unroll such loop, it appears to
1130      roll too little, and it may even seem to be cold.  To avoid this, we
1131      ensure that the created loop appears to roll at least 5 times (but at
1132      most as many times as before unrolling).  Don't do adjustment if profile
1133      feedback is present.  */
1134   if (new_est_niter < 5 && !profile_p)
1135     {
1136       if (est_niter < 5)
1137 	new_est_niter = est_niter;
1138       else
1139 	new_est_niter = 5;
1140     }
1141 
1142   if (loop->any_upper_bound)
1143     {
1144       /* As above, this is really CEIL (upper_bound + 1, factor) - 1.  */
1145       widest_int bound = wi::udiv_floor (loop->nb_iterations_upper_bound,
1146 					 factor);
1147       if (wi::ltu_p (bound, new_est_niter))
1148 	new_est_niter = bound.to_uhwi ();
1149     }
1150 
1151   return new_est_niter;
1152 }
1153 
1154 /* Unroll LOOP FACTOR times.  DESC describes number of iterations of LOOP.
1155    EXIT is the exit of the loop to that DESC corresponds.
1156 
1157    If N is number of iterations of the loop and MAY_BE_ZERO is the condition
1158    under that loop exits in the first iteration even if N != 0,
1159 
1160    while (1)
1161      {
1162        x = phi (init, next);
1163 
1164        pre;
1165        if (st)
1166          break;
1167        post;
1168      }
1169 
1170    becomes (with possibly the exit conditions formulated a bit differently,
1171    avoiding the need to create a new iv):
1172 
1173    if (MAY_BE_ZERO || N < FACTOR)
1174      goto rest;
1175 
1176    do
1177      {
1178        x = phi (init, next);
1179 
1180        pre;
1181        post;
1182        pre;
1183        post;
1184        ...
1185        pre;
1186        post;
1187        N -= FACTOR;
1188 
1189      } while (N >= FACTOR);
1190 
1191    rest:
1192      init' = phi (init, x);
1193 
1194    while (1)
1195      {
1196        x = phi (init', next);
1197 
1198        pre;
1199        if (st)
1200          break;
1201        post;
1202      }
1203 
1204    Before the loop is unrolled, TRANSFORM is called for it (only for the
1205    unrolled loop, but not for its versioned copy).  DATA is passed to
1206    TRANSFORM.  */
1207 
1208 /* Probability in % that the unrolled loop is entered.  Just a guess.  */
1209 #define PROB_UNROLLED_LOOP_ENTERED 90
1210 
1211 void
tree_transform_and_unroll_loop(struct loop * loop,unsigned factor,edge exit,struct tree_niter_desc * desc,transform_callback transform,void * data)1212 tree_transform_and_unroll_loop (struct loop *loop, unsigned factor,
1213 				edge exit, struct tree_niter_desc *desc,
1214 				transform_callback transform,
1215 				void *data)
1216 {
1217   gcond *exit_if;
1218   tree ctr_before, ctr_after;
1219   tree enter_main_cond, exit_base, exit_step, exit_bound;
1220   enum tree_code exit_cmp;
1221   gphi *phi_old_loop, *phi_new_loop, *phi_rest;
1222   gphi_iterator psi_old_loop, psi_new_loop;
1223   tree init, next, new_init;
1224   struct loop *new_loop;
1225   basic_block rest, exit_bb;
1226   edge old_entry, new_entry, old_latch, precond_edge, new_exit;
1227   edge new_nonexit, e;
1228   gimple_stmt_iterator bsi;
1229   use_operand_p op;
1230   bool ok;
1231   unsigned i;
1232   profile_probability prob, prob_entry, scale_unrolled;
1233   profile_count freq_e, freq_h;
1234   gcov_type new_est_niter = niter_for_unrolled_loop (loop, factor);
1235   unsigned irr = loop_preheader_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP;
1236   auto_vec<edge> to_remove;
1237 
1238   determine_exit_conditions (loop, desc, factor,
1239 			     &enter_main_cond, &exit_base, &exit_step,
1240 			     &exit_cmp, &exit_bound);
1241 
1242   /* Let us assume that the unrolled loop is quite likely to be entered.  */
1243   if (integer_nonzerop (enter_main_cond))
1244     prob_entry = profile_probability::always ();
1245   else
1246     prob_entry = profile_probability::guessed_always ()
1247 			.apply_scale (PROB_UNROLLED_LOOP_ENTERED, 100);
1248 
1249   /* The values for scales should keep profile consistent, and somewhat close
1250      to correct.
1251 
1252      TODO: The current value of SCALE_REST makes it appear that the loop that
1253      is created by splitting the remaining iterations of the unrolled loop is
1254      executed the same number of times as the original loop, and with the same
1255      frequencies, which is obviously wrong.  This does not appear to cause
1256      problems, so we do not bother with fixing it for now.  To make the profile
1257      correct, we would need to change the probability of the exit edge of the
1258      loop, and recompute the distribution of frequencies in its body because
1259      of this change (scale the frequencies of blocks before and after the exit
1260      by appropriate factors).  */
1261   scale_unrolled = prob_entry;
1262 
1263   new_loop = loop_version (loop, enter_main_cond, NULL, prob_entry,
1264 			   prob_entry.invert (), scale_unrolled,
1265 			   profile_probability::guessed_always (),
1266 			   true);
1267   gcc_assert (new_loop != NULL);
1268   update_ssa (TODO_update_ssa);
1269 
1270   /* Prepare the cfg and update the phi nodes.  Move the loop exit to the
1271      loop latch (and make its condition dummy, for the moment).  */
1272   rest = loop_preheader_edge (new_loop)->src;
1273   precond_edge = single_pred_edge (rest);
1274   split_edge (loop_latch_edge (loop));
1275   exit_bb = single_pred (loop->latch);
1276 
1277   /* Since the exit edge will be removed, the frequency of all the blocks
1278      in the loop that are dominated by it must be scaled by
1279      1 / (1 - exit->probability).  */
1280   if (exit->probability.initialized_p ())
1281     scale_dominated_blocks_in_loop (loop, exit->src,
1282 				    /* We are scaling up here so probability
1283 				       does not fit.  */
1284 				    loop->header->count,
1285 				    loop->header->count
1286 				    - loop->header->count.apply_probability
1287 					 (exit->probability));
1288 
1289   bsi = gsi_last_bb (exit_bb);
1290   exit_if = gimple_build_cond (EQ_EXPR, integer_zero_node,
1291 			       integer_zero_node,
1292 			       NULL_TREE, NULL_TREE);
1293 
1294   gsi_insert_after (&bsi, exit_if, GSI_NEW_STMT);
1295   new_exit = make_edge (exit_bb, rest, EDGE_FALSE_VALUE | irr);
1296   rescan_loop_exit (new_exit, true, false);
1297 
1298   /* Set the probability of new exit to the same of the old one.  Fix
1299      the frequency of the latch block, by scaling it back by
1300      1 - exit->probability.  */
1301   new_exit->probability = exit->probability;
1302   new_nonexit = single_pred_edge (loop->latch);
1303   new_nonexit->probability = exit->probability.invert ();
1304   new_nonexit->flags = EDGE_TRUE_VALUE;
1305   if (new_nonexit->probability.initialized_p ())
1306     scale_bbs_frequencies (&loop->latch, 1, new_nonexit->probability);
1307 
1308   old_entry = loop_preheader_edge (loop);
1309   new_entry = loop_preheader_edge (new_loop);
1310   old_latch = loop_latch_edge (loop);
1311   for (psi_old_loop = gsi_start_phis (loop->header),
1312        psi_new_loop = gsi_start_phis (new_loop->header);
1313        !gsi_end_p (psi_old_loop);
1314        gsi_next (&psi_old_loop), gsi_next (&psi_new_loop))
1315     {
1316       phi_old_loop = psi_old_loop.phi ();
1317       phi_new_loop = psi_new_loop.phi ();
1318 
1319       init = PHI_ARG_DEF_FROM_EDGE (phi_old_loop, old_entry);
1320       op = PHI_ARG_DEF_PTR_FROM_EDGE (phi_new_loop, new_entry);
1321       gcc_assert (operand_equal_for_phi_arg_p (init, USE_FROM_PTR (op)));
1322       next = PHI_ARG_DEF_FROM_EDGE (phi_old_loop, old_latch);
1323 
1324       /* Prefer using original variable as a base for the new ssa name.
1325 	 This is necessary for virtual ops, and useful in order to avoid
1326 	 losing debug info for real ops.  */
1327       if (TREE_CODE (next) == SSA_NAME
1328 	  && useless_type_conversion_p (TREE_TYPE (next),
1329 					TREE_TYPE (init)))
1330 	new_init = copy_ssa_name (next);
1331       else if (TREE_CODE (init) == SSA_NAME
1332 	       && useless_type_conversion_p (TREE_TYPE (init),
1333 					     TREE_TYPE (next)))
1334 	new_init = copy_ssa_name (init);
1335       else if (useless_type_conversion_p (TREE_TYPE (next), TREE_TYPE (init)))
1336 	new_init = make_temp_ssa_name (TREE_TYPE (next), NULL, "unrinittmp");
1337       else
1338 	new_init = make_temp_ssa_name (TREE_TYPE (init), NULL, "unrinittmp");
1339 
1340       phi_rest = create_phi_node (new_init, rest);
1341 
1342       add_phi_arg (phi_rest, init, precond_edge, UNKNOWN_LOCATION);
1343       add_phi_arg (phi_rest, next, new_exit, UNKNOWN_LOCATION);
1344       SET_USE (op, new_init);
1345     }
1346 
1347   remove_path (exit);
1348 
1349   /* Transform the loop.  */
1350   if (transform)
1351     (*transform) (loop, data);
1352 
1353   /* Unroll the loop and remove the exits in all iterations except for the
1354      last one.  */
1355   auto_sbitmap wont_exit (factor);
1356   bitmap_ones (wont_exit);
1357   bitmap_clear_bit (wont_exit, factor - 1);
1358 
1359   ok = gimple_duplicate_loop_to_header_edge
1360 	  (loop, loop_latch_edge (loop), factor - 1,
1361 	   wont_exit, new_exit, &to_remove, DLTHE_FLAG_UPDATE_FREQ);
1362   gcc_assert (ok);
1363 
1364   FOR_EACH_VEC_ELT (to_remove, i, e)
1365     {
1366       ok = remove_path (e);
1367       gcc_assert (ok);
1368     }
1369   update_ssa (TODO_update_ssa);
1370 
1371   /* Ensure that the frequencies in the loop match the new estimated
1372      number of iterations, and change the probability of the new
1373      exit edge.  */
1374 
1375   freq_h = loop->header->count;
1376   freq_e = (loop_preheader_edge (loop))->count ();
1377   if (freq_h.nonzero_p ())
1378     {
1379       /* Avoid dropping loop body profile counter to 0 because of zero count
1380 	 in loop's preheader.  */
1381       if (freq_h.nonzero_p () && !(freq_e == profile_count::zero ()))
1382         freq_e = freq_e.force_nonzero ();
1383       scale_loop_frequencies (loop, freq_e.probability_in (freq_h));
1384     }
1385 
1386   exit_bb = single_pred (loop->latch);
1387   new_exit = find_edge (exit_bb, rest);
1388   new_exit->probability = profile_probability::always ()
1389 				.apply_scale (1, new_est_niter + 1);
1390 
1391   rest->count += new_exit->count ();
1392 
1393   new_nonexit = single_pred_edge (loop->latch);
1394   prob = new_nonexit->probability;
1395   new_nonexit->probability = new_exit->probability.invert ();
1396   prob = new_nonexit->probability / prob;
1397   if (prob.initialized_p ())
1398     scale_bbs_frequencies (&loop->latch, 1, prob);
1399 
1400   /* Finally create the new counter for number of iterations and add the new
1401      exit instruction.  */
1402   bsi = gsi_last_nondebug_bb (exit_bb);
1403   exit_if = as_a <gcond *> (gsi_stmt (bsi));
1404   create_iv (exit_base, exit_step, NULL_TREE, loop,
1405 	     &bsi, false, &ctr_before, &ctr_after);
1406   gimple_cond_set_code (exit_if, exit_cmp);
1407   gimple_cond_set_lhs (exit_if, ctr_after);
1408   gimple_cond_set_rhs (exit_if, exit_bound);
1409   update_stmt (exit_if);
1410 
1411   checking_verify_flow_info ();
1412   checking_verify_loop_structure ();
1413   checking_verify_loop_closed_ssa (true, loop);
1414   checking_verify_loop_closed_ssa (true, new_loop);
1415 }
1416 
1417 /* Wrapper over tree_transform_and_unroll_loop for case we do not
1418    want to transform the loop before unrolling.  The meaning
1419    of the arguments is the same as for tree_transform_and_unroll_loop.  */
1420 
1421 void
tree_unroll_loop(struct loop * loop,unsigned factor,edge exit,struct tree_niter_desc * desc)1422 tree_unroll_loop (struct loop *loop, unsigned factor,
1423 		  edge exit, struct tree_niter_desc *desc)
1424 {
1425   tree_transform_and_unroll_loop (loop, factor, exit, desc,
1426 				  NULL, NULL);
1427 }
1428 
1429 /* Rewrite the phi node at position PSI in function of the main
1430    induction variable MAIN_IV and insert the generated code at GSI.  */
1431 
1432 static void
rewrite_phi_with_iv(loop_p loop,gphi_iterator * psi,gimple_stmt_iterator * gsi,tree main_iv)1433 rewrite_phi_with_iv (loop_p loop,
1434 		     gphi_iterator *psi,
1435 		     gimple_stmt_iterator *gsi,
1436 		     tree main_iv)
1437 {
1438   affine_iv iv;
1439   gassign *stmt;
1440   gphi *phi = psi->phi ();
1441   tree atype, mtype, val, res = PHI_RESULT (phi);
1442 
1443   if (virtual_operand_p (res) || res == main_iv)
1444     {
1445       gsi_next (psi);
1446       return;
1447     }
1448 
1449   if (!simple_iv (loop, loop, res, &iv, true))
1450     {
1451       gsi_next (psi);
1452       return;
1453     }
1454 
1455   remove_phi_node (psi, false);
1456 
1457   atype = TREE_TYPE (res);
1458   mtype = POINTER_TYPE_P (atype) ? sizetype : atype;
1459   val = fold_build2 (MULT_EXPR, mtype, unshare_expr (iv.step),
1460 		     fold_convert (mtype, main_iv));
1461   val = fold_build2 (POINTER_TYPE_P (atype)
1462 		     ? POINTER_PLUS_EXPR : PLUS_EXPR,
1463 		     atype, unshare_expr (iv.base), val);
1464   val = force_gimple_operand_gsi (gsi, val, false, NULL_TREE, true,
1465 				  GSI_SAME_STMT);
1466   stmt = gimple_build_assign (res, val);
1467   gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1468 }
1469 
1470 /* Rewrite all the phi nodes of LOOP in function of the main induction
1471    variable MAIN_IV.  */
1472 
1473 static void
rewrite_all_phi_nodes_with_iv(loop_p loop,tree main_iv)1474 rewrite_all_phi_nodes_with_iv (loop_p loop, tree main_iv)
1475 {
1476   unsigned i;
1477   basic_block *bbs = get_loop_body_in_dom_order (loop);
1478   gphi_iterator psi;
1479 
1480   for (i = 0; i < loop->num_nodes; i++)
1481     {
1482       basic_block bb = bbs[i];
1483       gimple_stmt_iterator gsi = gsi_after_labels (bb);
1484 
1485       if (bb->loop_father != loop)
1486 	continue;
1487 
1488       for (psi = gsi_start_phis (bb); !gsi_end_p (psi); )
1489 	rewrite_phi_with_iv (loop, &psi, &gsi, main_iv);
1490     }
1491 
1492   free (bbs);
1493 }
1494 
1495 /* Bases all the induction variables in LOOP on a single induction variable
1496    (with base 0 and step 1), whose final value is compared with *NIT.  When the
1497    IV type precision has to be larger than *NIT type precision, *NIT is
1498    converted to the larger type, the conversion code is inserted before the
1499    loop, and *NIT is updated to the new definition.  When BUMP_IN_LATCH is true,
1500    the induction variable is incremented in the loop latch, otherwise it is
1501    incremented in the loop header.  Return the induction variable that was
1502    created.  */
1503 
1504 tree
canonicalize_loop_ivs(struct loop * loop,tree * nit,bool bump_in_latch)1505 canonicalize_loop_ivs (struct loop *loop, tree *nit, bool bump_in_latch)
1506 {
1507   unsigned precision = TYPE_PRECISION (TREE_TYPE (*nit));
1508   unsigned original_precision = precision;
1509   tree type, var_before;
1510   gimple_stmt_iterator gsi;
1511   gphi_iterator psi;
1512   gcond *stmt;
1513   edge exit = single_dom_exit (loop);
1514   gimple_seq stmts;
1515   bool unsigned_p = false;
1516 
1517   for (psi = gsi_start_phis (loop->header);
1518        !gsi_end_p (psi); gsi_next (&psi))
1519     {
1520       gphi *phi = psi.phi ();
1521       tree res = PHI_RESULT (phi);
1522       bool uns;
1523 
1524       type = TREE_TYPE (res);
1525       if (virtual_operand_p (res)
1526 	  || (!INTEGRAL_TYPE_P (type)
1527 	      && !POINTER_TYPE_P (type))
1528 	  || TYPE_PRECISION (type) < precision)
1529 	continue;
1530 
1531       uns = POINTER_TYPE_P (type) | TYPE_UNSIGNED (type);
1532 
1533       if (TYPE_PRECISION (type) > precision)
1534 	unsigned_p = uns;
1535       else
1536 	unsigned_p |= uns;
1537 
1538       precision = TYPE_PRECISION (type);
1539     }
1540 
1541   scalar_int_mode mode = smallest_int_mode_for_size (precision);
1542   precision = GET_MODE_PRECISION (mode);
1543   type = build_nonstandard_integer_type (precision, unsigned_p);
1544 
1545   if (original_precision != precision)
1546     {
1547       *nit = fold_convert (type, *nit);
1548       *nit = force_gimple_operand (*nit, &stmts, true, NULL_TREE);
1549       if (stmts)
1550 	gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
1551     }
1552 
1553   if (bump_in_latch)
1554     gsi = gsi_last_bb (loop->latch);
1555   else
1556     gsi = gsi_last_nondebug_bb (loop->header);
1557   create_iv (build_int_cst_type (type, 0), build_int_cst (type, 1), NULL_TREE,
1558 	     loop, &gsi, bump_in_latch, &var_before, NULL);
1559 
1560   rewrite_all_phi_nodes_with_iv (loop, var_before);
1561 
1562   stmt = as_a <gcond *> (last_stmt (exit->src));
1563   /* Make the loop exit if the control condition is not satisfied.  */
1564   if (exit->flags & EDGE_TRUE_VALUE)
1565     {
1566       edge te, fe;
1567 
1568       extract_true_false_edges_from_block (exit->src, &te, &fe);
1569       te->flags = EDGE_FALSE_VALUE;
1570       fe->flags = EDGE_TRUE_VALUE;
1571     }
1572   gimple_cond_set_code (stmt, LT_EXPR);
1573   gimple_cond_set_lhs (stmt, var_before);
1574   gimple_cond_set_rhs (stmt, *nit);
1575   update_stmt (stmt);
1576 
1577   return var_before;
1578 }
1579