1 /* High-level loop manipulation functions.
2 Copyright (C) 2004-2022 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "cfghooks.h"
27 #include "tree-pass.h" /* ??? for TODO_update_ssa but this isn't a pass. */
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "fold-const.h"
31 #include "cfganal.h"
32 #include "gimplify.h"
33 #include "gimple-iterator.h"
34 #include "gimplify-me.h"
35 #include "tree-cfg.h"
36 #include "tree-ssa-loop-ivopts.h"
37 #include "tree-ssa-loop-manip.h"
38 #include "tree-ssa-loop-niter.h"
39 #include "tree-ssa-loop.h"
40 #include "tree-into-ssa.h"
41 #include "tree-ssa.h"
42 #include "cfgloop.h"
43 #include "tree-scalar-evolution.h"
44 #include "tree-inline.h"
45
46 /* All bitmaps for rewriting into loop-closed SSA go on this obstack,
47 so that we can free them all at once. */
48 static bitmap_obstack loop_renamer_obstack;
49
50 /* Creates an induction variable with value BASE + STEP * iteration in LOOP.
51 It is expected that neither BASE nor STEP are shared with other expressions
52 (unless the sharing rules allow this). Use VAR as a base var_decl for it
53 (if NULL, a new temporary will be created). The increment will occur at
54 INCR_POS (after it if AFTER is true, before it otherwise). INCR_POS and
55 AFTER can be computed using standard_iv_increment_position. The ssa versions
56 of the variable before and after increment will be stored in VAR_BEFORE and
57 VAR_AFTER (unless they are NULL). */
58
59 void
create_iv(tree base,tree step,tree var,class loop * loop,gimple_stmt_iterator * incr_pos,bool after,tree * var_before,tree * var_after)60 create_iv (tree base, tree step, tree var, class loop *loop,
61 gimple_stmt_iterator *incr_pos, bool after,
62 tree *var_before, tree *var_after)
63 {
64 gassign *stmt;
65 gphi *phi;
66 tree initial, step1;
67 gimple_seq stmts;
68 tree vb, va;
69 enum tree_code incr_op = PLUS_EXPR;
70 edge pe = loop_preheader_edge (loop);
71
72 if (var != NULL_TREE)
73 {
74 vb = make_ssa_name (var);
75 va = make_ssa_name (var);
76 }
77 else
78 {
79 vb = make_temp_ssa_name (TREE_TYPE (base), NULL, "ivtmp");
80 va = make_temp_ssa_name (TREE_TYPE (base), NULL, "ivtmp");
81 }
82 if (var_before)
83 *var_before = vb;
84 if (var_after)
85 *var_after = va;
86
87 /* For easier readability of the created code, produce MINUS_EXPRs
88 when suitable. */
89 if (TREE_CODE (step) == INTEGER_CST)
90 {
91 if (TYPE_UNSIGNED (TREE_TYPE (step)))
92 {
93 step1 = fold_build1 (NEGATE_EXPR, TREE_TYPE (step), step);
94 if (tree_int_cst_lt (step1, step))
95 {
96 incr_op = MINUS_EXPR;
97 step = step1;
98 }
99 }
100 else
101 {
102 bool ovf;
103
104 if (!tree_expr_nonnegative_warnv_p (step, &ovf)
105 && may_negate_without_overflow_p (step))
106 {
107 incr_op = MINUS_EXPR;
108 step = fold_build1 (NEGATE_EXPR, TREE_TYPE (step), step);
109 }
110 }
111 }
112 if (POINTER_TYPE_P (TREE_TYPE (base)))
113 {
114 if (TREE_CODE (base) == ADDR_EXPR)
115 mark_addressable (TREE_OPERAND (base, 0));
116 step = convert_to_ptrofftype (step);
117 if (incr_op == MINUS_EXPR)
118 step = fold_build1 (NEGATE_EXPR, TREE_TYPE (step), step);
119 incr_op = POINTER_PLUS_EXPR;
120 }
121 /* Gimplify the step if necessary. We put the computations in front of the
122 loop (i.e. the step should be loop invariant). */
123 step = force_gimple_operand (step, &stmts, true, NULL_TREE);
124 if (stmts)
125 gsi_insert_seq_on_edge_immediate (pe, stmts);
126
127 stmt = gimple_build_assign (va, incr_op, vb, step);
128 /* Prevent the increment from inheriting a bogus location if it is not put
129 immediately after a statement whose location is known. */
130 if (after)
131 {
132 if (gsi_end_p (*incr_pos)
133 || (is_gimple_debug (gsi_stmt (*incr_pos))
134 && gsi_bb (*incr_pos)
135 && gsi_end_p (gsi_last_nondebug_bb (gsi_bb (*incr_pos)))))
136 {
137 edge e = single_succ_edge (gsi_bb (*incr_pos));
138 gimple_set_location (stmt, e->goto_locus);
139 }
140 gsi_insert_after (incr_pos, stmt, GSI_NEW_STMT);
141 }
142 else
143 {
144 gimple_stmt_iterator gsi = *incr_pos;
145 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
146 gsi_next_nondebug (&gsi);
147 if (!gsi_end_p (gsi))
148 gimple_set_location (stmt, gimple_location (gsi_stmt (gsi)));
149 gsi_insert_before (incr_pos, stmt, GSI_NEW_STMT);
150 }
151
152 initial = force_gimple_operand (base, &stmts, true, var);
153 if (stmts)
154 gsi_insert_seq_on_edge_immediate (pe, stmts);
155
156 phi = create_phi_node (vb, loop->header);
157 add_phi_arg (phi, initial, loop_preheader_edge (loop), UNKNOWN_LOCATION);
158 add_phi_arg (phi, va, loop_latch_edge (loop), UNKNOWN_LOCATION);
159 }
160
161 /* Return the innermost superloop LOOP of USE_LOOP that is a superloop of
162 both DEF_LOOP and USE_LOOP. */
163
164 static inline class loop *
find_sibling_superloop(class loop * use_loop,class loop * def_loop)165 find_sibling_superloop (class loop *use_loop, class loop *def_loop)
166 {
167 unsigned ud = loop_depth (use_loop);
168 unsigned dd = loop_depth (def_loop);
169 gcc_assert (ud > 0 && dd > 0);
170 if (ud > dd)
171 use_loop = superloop_at_depth (use_loop, dd);
172 if (ud < dd)
173 def_loop = superloop_at_depth (def_loop, ud);
174 while (loop_outer (use_loop) != loop_outer (def_loop))
175 {
176 use_loop = loop_outer (use_loop);
177 def_loop = loop_outer (def_loop);
178 gcc_assert (use_loop && def_loop);
179 }
180 return use_loop;
181 }
182
183 /* DEF_BB is a basic block containing a DEF that needs rewriting into
184 loop-closed SSA form. USE_BLOCKS is the set of basic blocks containing
185 uses of DEF that "escape" from the loop containing DEF_BB (i.e. blocks in
186 USE_BLOCKS are dominated by DEF_BB but not in the loop father of DEF_B).
187 ALL_EXITS[I] is the set of all basic blocks that exit loop I.
188
189 Compute the subset of LOOP_EXITS that exit the loop containing DEF_BB
190 or one of its loop fathers, in which DEF is live. This set is returned
191 in the bitmap LIVE_EXITS.
192
193 Instead of computing the complete livein set of the def, we use the loop
194 nesting tree as a form of poor man's structure analysis. This greatly
195 speeds up the analysis, which is important because this function may be
196 called on all SSA names that need rewriting, one at a time. */
197
198 static void
compute_live_loop_exits(bitmap live_exits,bitmap use_blocks,bitmap * loop_exits,basic_block def_bb)199 compute_live_loop_exits (bitmap live_exits, bitmap use_blocks,
200 bitmap *loop_exits, basic_block def_bb)
201 {
202 unsigned i;
203 bitmap_iterator bi;
204 class loop *def_loop = def_bb->loop_father;
205 unsigned def_loop_depth = loop_depth (def_loop);
206 bitmap def_loop_exits;
207
208 /* Normally the work list size is bounded by the number of basic
209 blocks in the largest loop. We don't know this number, but we
210 can be fairly sure that it will be relatively small. */
211 auto_vec<basic_block> worklist (MAX (8, n_basic_blocks_for_fn (cfun) / 128));
212
213 EXECUTE_IF_SET_IN_BITMAP (use_blocks, 0, i, bi)
214 {
215 basic_block use_bb = BASIC_BLOCK_FOR_FN (cfun, i);
216 class loop *use_loop = use_bb->loop_father;
217 gcc_checking_assert (def_loop != use_loop
218 && ! flow_loop_nested_p (def_loop, use_loop));
219 if (! flow_loop_nested_p (use_loop, def_loop))
220 use_bb = find_sibling_superloop (use_loop, def_loop)->header;
221 if (bitmap_set_bit (live_exits, use_bb->index))
222 worklist.safe_push (use_bb);
223 }
224
225 /* Iterate until the worklist is empty. */
226 while (! worklist.is_empty ())
227 {
228 edge e;
229 edge_iterator ei;
230
231 /* Pull a block off the worklist. */
232 basic_block bb = worklist.pop ();
233
234 /* Make sure we have at least enough room in the work list
235 for all predecessors of this block. */
236 worklist.reserve (EDGE_COUNT (bb->preds));
237
238 /* For each predecessor block. */
239 FOR_EACH_EDGE (e, ei, bb->preds)
240 {
241 basic_block pred = e->src;
242 class loop *pred_loop = pred->loop_father;
243 unsigned pred_loop_depth = loop_depth (pred_loop);
244 bool pred_visited;
245
246 /* We should have met DEF_BB along the way. */
247 gcc_assert (pred != ENTRY_BLOCK_PTR_FOR_FN (cfun));
248
249 if (pred_loop_depth >= def_loop_depth)
250 {
251 if (pred_loop_depth > def_loop_depth)
252 pred_loop = superloop_at_depth (pred_loop, def_loop_depth);
253 /* If we've reached DEF_LOOP, our train ends here. */
254 if (pred_loop == def_loop)
255 continue;
256 }
257 else if (! flow_loop_nested_p (pred_loop, def_loop))
258 pred = find_sibling_superloop (pred_loop, def_loop)->header;
259
260 /* Add PRED to the LIVEIN set. PRED_VISITED is true if
261 we had already added PRED to LIVEIN before. */
262 pred_visited = !bitmap_set_bit (live_exits, pred->index);
263
264 /* If we have visited PRED before, don't add it to the worklist.
265 If BB dominates PRED, then we're probably looking at a loop.
266 We're only interested in looking up in the dominance tree
267 because DEF_BB dominates all the uses. */
268 if (pred_visited || dominated_by_p (CDI_DOMINATORS, pred, bb))
269 continue;
270
271 worklist.quick_push (pred);
272 }
273 }
274
275 def_loop_exits = BITMAP_ALLOC (&loop_renamer_obstack);
276 for (class loop *loop = def_loop;
277 loop != current_loops->tree_root;
278 loop = loop_outer (loop))
279 bitmap_ior_into (def_loop_exits, loop_exits[loop->num]);
280 bitmap_and_into (live_exits, def_loop_exits);
281 BITMAP_FREE (def_loop_exits);
282 }
283
284 /* Add a loop-closing PHI for VAR in basic block EXIT. */
285
286 static void
add_exit_phi(basic_block exit,tree var)287 add_exit_phi (basic_block exit, tree var)
288 {
289 gphi *phi;
290 edge e;
291 edge_iterator ei;
292
293 /* Check that at least one of the edges entering the EXIT block exits
294 the loop, or a superloop of that loop, that VAR is defined in. */
295 if (flag_checking)
296 {
297 gimple *def_stmt = SSA_NAME_DEF_STMT (var);
298 basic_block def_bb = gimple_bb (def_stmt);
299 FOR_EACH_EDGE (e, ei, exit->preds)
300 {
301 class loop *aloop = find_common_loop (def_bb->loop_father,
302 e->src->loop_father);
303 if (!flow_bb_inside_loop_p (aloop, e->dest))
304 break;
305 }
306 gcc_assert (e);
307 }
308
309 phi = create_phi_node (NULL_TREE, exit);
310 create_new_def_for (var, phi, gimple_phi_result_ptr (phi));
311 FOR_EACH_EDGE (e, ei, exit->preds)
312 add_phi_arg (phi, var, e, UNKNOWN_LOCATION);
313
314 if (dump_file && (dump_flags & TDF_DETAILS))
315 {
316 fprintf (dump_file, ";; Created LCSSA PHI: ");
317 print_gimple_stmt (dump_file, phi, 0, dump_flags);
318 }
319 }
320
321 /* Add exit phis for VAR that is used in LIVEIN.
322 Exits of the loops are stored in LOOP_EXITS. */
323
324 static void
add_exit_phis_var(tree var,bitmap use_blocks,bitmap * loop_exits)325 add_exit_phis_var (tree var, bitmap use_blocks, bitmap *loop_exits)
326 {
327 unsigned index;
328 bitmap_iterator bi;
329 basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (var));
330 bitmap live_exits = BITMAP_ALLOC (&loop_renamer_obstack);
331
332 gcc_checking_assert (! bitmap_bit_p (use_blocks, def_bb->index));
333
334 compute_live_loop_exits (live_exits, use_blocks, loop_exits, def_bb);
335
336 EXECUTE_IF_SET_IN_BITMAP (live_exits, 0, index, bi)
337 {
338 add_exit_phi (BASIC_BLOCK_FOR_FN (cfun, index), var);
339 }
340
341 BITMAP_FREE (live_exits);
342 }
343
344 /* Add exit phis for the names marked in NAMES_TO_RENAME.
345 Exits of the loops are stored in EXITS. Sets of blocks where the ssa
346 names are used are stored in USE_BLOCKS. */
347
348 static void
add_exit_phis(bitmap names_to_rename,bitmap * use_blocks,bitmap * loop_exits)349 add_exit_phis (bitmap names_to_rename, bitmap *use_blocks, bitmap *loop_exits)
350 {
351 unsigned i;
352 bitmap_iterator bi;
353
354 EXECUTE_IF_SET_IN_BITMAP (names_to_rename, 0, i, bi)
355 {
356 add_exit_phis_var (ssa_name (i), use_blocks[i], loop_exits);
357 }
358 }
359
360 /* Fill the array of bitmaps LOOP_EXITS with all loop exit edge targets. */
361
362 static void
get_loops_exits(bitmap * loop_exits)363 get_loops_exits (bitmap *loop_exits)
364 {
365 unsigned j;
366 edge e;
367
368 for (auto loop : loops_list (cfun, 0))
369 {
370 auto_vec<edge> exit_edges = get_loop_exit_edges (loop);
371 loop_exits[loop->num] = BITMAP_ALLOC (&loop_renamer_obstack);
372 FOR_EACH_VEC_ELT (exit_edges, j, e)
373 bitmap_set_bit (loop_exits[loop->num], e->dest->index);
374 }
375 }
376
377 /* For USE in BB, if it is used outside of the loop it is defined in,
378 mark it for rewrite. Record basic block BB where it is used
379 to USE_BLOCKS. Record the ssa name index to NEED_PHIS bitmap.
380 Note that for USEs in phis, BB should be the src of the edge corresponding to
381 the use, rather than the bb containing the phi. */
382
383 static void
find_uses_to_rename_use(basic_block bb,tree use,bitmap * use_blocks,bitmap need_phis)384 find_uses_to_rename_use (basic_block bb, tree use, bitmap *use_blocks,
385 bitmap need_phis)
386 {
387 unsigned ver;
388 basic_block def_bb;
389 class loop *def_loop;
390
391 if (TREE_CODE (use) != SSA_NAME)
392 return;
393
394 ver = SSA_NAME_VERSION (use);
395 def_bb = gimple_bb (SSA_NAME_DEF_STMT (use));
396 if (!def_bb)
397 return;
398 def_loop = def_bb->loop_father;
399
400 /* If the definition is not inside a loop, it is not interesting. */
401 if (!loop_outer (def_loop))
402 return;
403
404 /* If the use is not outside of the loop it is defined in, it is not
405 interesting. */
406 if (flow_bb_inside_loop_p (def_loop, bb))
407 return;
408
409 /* If we're seeing VER for the first time, we still have to allocate
410 a bitmap for its uses. */
411 if (bitmap_set_bit (need_phis, ver))
412 use_blocks[ver] = BITMAP_ALLOC (&loop_renamer_obstack);
413 bitmap_set_bit (use_blocks[ver], bb->index);
414 }
415
416 /* For uses matching USE_FLAGS in STMT, mark names that are used outside of the
417 loop they are defined to rewrite. Record the set of blocks in which the ssa
418 names are used to USE_BLOCKS, and the ssa names themselves to NEED_PHIS. */
419
420 static void
find_uses_to_rename_stmt(gimple * stmt,bitmap * use_blocks,bitmap need_phis,int use_flags)421 find_uses_to_rename_stmt (gimple *stmt, bitmap *use_blocks, bitmap need_phis,
422 int use_flags)
423 {
424 ssa_op_iter iter;
425 tree var;
426 basic_block bb = gimple_bb (stmt);
427
428 if (is_gimple_debug (stmt))
429 return;
430
431 /* FOR_EACH_SSA_TREE_OPERAND iterator does not allows SSA_OP_VIRTUAL_USES
432 only. */
433 if (use_flags == SSA_OP_VIRTUAL_USES)
434 {
435 tree vuse = gimple_vuse (stmt);
436 if (vuse != NULL_TREE)
437 find_uses_to_rename_use (bb, gimple_vuse (stmt), use_blocks, need_phis);
438 }
439 else
440 FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, use_flags)
441 find_uses_to_rename_use (bb, var, use_blocks, need_phis);
442 }
443
444 /* Marks names matching USE_FLAGS that are used in BB and outside of the loop
445 they are defined in for rewrite. Records the set of blocks in which the ssa
446 names are used to USE_BLOCKS. Record the SSA names that will
447 need exit PHIs in NEED_PHIS. */
448
449 static void
find_uses_to_rename_bb(basic_block bb,bitmap * use_blocks,bitmap need_phis,int use_flags)450 find_uses_to_rename_bb (basic_block bb, bitmap *use_blocks, bitmap need_phis,
451 int use_flags)
452 {
453 edge e;
454 edge_iterator ei;
455 bool do_virtuals = (use_flags & SSA_OP_VIRTUAL_USES) != 0;
456 bool do_nonvirtuals = (use_flags & SSA_OP_USE) != 0;
457
458 FOR_EACH_EDGE (e, ei, bb->succs)
459 for (gphi_iterator bsi = gsi_start_phis (e->dest); !gsi_end_p (bsi);
460 gsi_next (&bsi))
461 {
462 gphi *phi = bsi.phi ();
463 bool virtual_p = virtual_operand_p (gimple_phi_result (phi));
464 if ((virtual_p && do_virtuals)
465 || (!virtual_p && do_nonvirtuals))
466 find_uses_to_rename_use (bb, PHI_ARG_DEF_FROM_EDGE (phi, e),
467 use_blocks, need_phis);
468 }
469
470 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
471 gsi_next (&bsi))
472 find_uses_to_rename_stmt (gsi_stmt (bsi), use_blocks, need_phis,
473 use_flags);
474 }
475
476 /* Marks names matching USE_FLAGS that are used outside of the loop they are
477 defined in for rewrite. Records the set of blocks in which the ssa names are
478 used to USE_BLOCKS. Record the SSA names that will need exit PHIs in
479 NEED_PHIS. If CHANGED_BBS is not NULL, scan only blocks in this set. */
480
481 static void
find_uses_to_rename(bitmap changed_bbs,bitmap * use_blocks,bitmap need_phis,int use_flags)482 find_uses_to_rename (bitmap changed_bbs, bitmap *use_blocks, bitmap need_phis,
483 int use_flags)
484 {
485 basic_block bb;
486 unsigned index;
487 bitmap_iterator bi;
488
489 if (changed_bbs)
490 EXECUTE_IF_SET_IN_BITMAP (changed_bbs, 0, index, bi)
491 {
492 bb = BASIC_BLOCK_FOR_FN (cfun, index);
493 if (bb)
494 find_uses_to_rename_bb (bb, use_blocks, need_phis, use_flags);
495 }
496 else
497 FOR_EACH_BB_FN (bb, cfun)
498 find_uses_to_rename_bb (bb, use_blocks, need_phis, use_flags);
499 }
500
501 /* Mark uses of DEF that are used outside of the loop they are defined in for
502 rewrite. Record the set of blocks in which the ssa names are used to
503 USE_BLOCKS. Record the SSA names that will need exit PHIs in NEED_PHIS. */
504
505 static void
find_uses_to_rename_def(tree def,bitmap * use_blocks,bitmap need_phis)506 find_uses_to_rename_def (tree def, bitmap *use_blocks, bitmap need_phis)
507 {
508 gimple *use_stmt;
509 imm_use_iterator imm_iter;
510
511 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, def)
512 {
513 if (is_gimple_debug (use_stmt))
514 continue;
515
516 basic_block use_bb = gimple_bb (use_stmt);
517
518 use_operand_p use_p;
519 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
520 {
521 if (gimple_code (use_stmt) == GIMPLE_PHI)
522 {
523 edge e = gimple_phi_arg_edge (as_a <gphi *> (use_stmt),
524 PHI_ARG_INDEX_FROM_USE (use_p));
525 use_bb = e->src;
526 }
527 find_uses_to_rename_use (use_bb, USE_FROM_PTR (use_p), use_blocks,
528 need_phis);
529 }
530 }
531 }
532
533 /* Marks names matching USE_FLAGS that are defined in LOOP and used outside of
534 it for rewrite. Records the set of blocks in which the ssa names are used to
535 USE_BLOCKS. Record the SSA names that will need exit PHIs in NEED_PHIS. */
536
537 static void
find_uses_to_rename_in_loop(class loop * loop,bitmap * use_blocks,bitmap need_phis,int use_flags)538 find_uses_to_rename_in_loop (class loop *loop, bitmap *use_blocks,
539 bitmap need_phis, int use_flags)
540 {
541 bool do_virtuals = (use_flags & SSA_OP_VIRTUAL_USES) != 0;
542 bool do_nonvirtuals = (use_flags & SSA_OP_USE) != 0;
543 int def_flags = ((do_virtuals ? SSA_OP_VIRTUAL_DEFS : 0)
544 | (do_nonvirtuals ? SSA_OP_DEF : 0));
545
546
547 basic_block *bbs = get_loop_body (loop);
548
549 for (unsigned int i = 0; i < loop->num_nodes; i++)
550 {
551 basic_block bb = bbs[i];
552
553 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
554 gsi_next (&bsi))
555 {
556 gphi *phi = bsi.phi ();
557 tree res = gimple_phi_result (phi);
558 bool virtual_p = virtual_operand_p (res);
559 if ((virtual_p && do_virtuals)
560 || (!virtual_p && do_nonvirtuals))
561 find_uses_to_rename_def (res, use_blocks, need_phis);
562 }
563
564 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
565 gsi_next (&bsi))
566 {
567 gimple *stmt = gsi_stmt (bsi);
568 /* FOR_EACH_SSA_TREE_OPERAND iterator does not allows
569 SSA_OP_VIRTUAL_DEFS only. */
570 if (def_flags == SSA_OP_VIRTUAL_DEFS)
571 {
572 tree vdef = gimple_vdef (stmt);
573 if (vdef != NULL)
574 find_uses_to_rename_def (vdef, use_blocks, need_phis);
575 }
576 else
577 {
578 tree var;
579 ssa_op_iter iter;
580 FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, def_flags)
581 find_uses_to_rename_def (var, use_blocks, need_phis);
582 }
583 }
584 }
585
586 XDELETEVEC (bbs);
587 }
588
589 /* Rewrites the program into a loop closed ssa form -- i.e. inserts extra
590 phi nodes to ensure that no variable is used outside the loop it is
591 defined in.
592
593 This strengthening of the basic ssa form has several advantages:
594
595 1) Updating it during unrolling/peeling/versioning is trivial, since
596 we do not need to care about the uses outside of the loop.
597 The same applies to virtual operands which are also rewritten into
598 loop closed SSA form. Note that virtual operands are always live
599 until function exit.
600 2) The behavior of all uses of an induction variable is the same.
601 Without this, you need to distinguish the case when the variable
602 is used outside of the loop it is defined in, for example
603
604 for (i = 0; i < 100; i++)
605 {
606 for (j = 0; j < 100; j++)
607 {
608 k = i + j;
609 use1 (k);
610 }
611 use2 (k);
612 }
613
614 Looking from the outer loop with the normal SSA form, the first use of k
615 is not well-behaved, while the second one is an induction variable with
616 base 99 and step 1.
617
618 If LOOP is non-null, only rewrite uses that have defs in LOOP. Otherwise,
619 if CHANGED_BBS is not NULL, we look for uses outside loops only in the
620 basic blocks in this set.
621
622 USE_FLAGS allows us to specify whether we want virtual, non-virtual or
623 both variables rewritten.
624
625 UPDATE_FLAG is used in the call to update_ssa. See
626 TODO_update_ssa* for documentation. */
627
628 void
rewrite_into_loop_closed_ssa_1(bitmap changed_bbs,unsigned update_flag,int use_flags,class loop * loop)629 rewrite_into_loop_closed_ssa_1 (bitmap changed_bbs, unsigned update_flag,
630 int use_flags, class loop *loop)
631 {
632 bitmap *use_blocks;
633 bitmap names_to_rename;
634
635 loops_state_set (LOOP_CLOSED_SSA);
636 if (number_of_loops (cfun) <= 1)
637 return;
638
639 /* If the pass has caused the SSA form to be out-of-date, update it
640 now. */
641 if (update_flag != 0)
642 update_ssa (update_flag);
643 else if (flag_checking)
644 verify_ssa (true, true);
645
646 bitmap_obstack_initialize (&loop_renamer_obstack);
647
648 names_to_rename = BITMAP_ALLOC (&loop_renamer_obstack);
649
650 /* Uses of names to rename. We don't have to initialize this array,
651 because we know that we will only have entries for the SSA names
652 in NAMES_TO_RENAME. */
653 use_blocks = XNEWVEC (bitmap, num_ssa_names);
654
655 if (loop != NULL)
656 {
657 gcc_assert (changed_bbs == NULL);
658 find_uses_to_rename_in_loop (loop, use_blocks, names_to_rename,
659 use_flags);
660 }
661 else
662 {
663 gcc_assert (loop == NULL);
664 find_uses_to_rename (changed_bbs, use_blocks, names_to_rename, use_flags);
665 }
666
667 if (!bitmap_empty_p (names_to_rename))
668 {
669 /* An array of bitmaps where LOOP_EXITS[I] is the set of basic blocks
670 that are the destination of an edge exiting loop number I. */
671 bitmap *loop_exits = XNEWVEC (bitmap, number_of_loops (cfun));
672 get_loops_exits (loop_exits);
673
674 /* Add the PHI nodes on exits of the loops for the names we need to
675 rewrite. */
676 add_exit_phis (names_to_rename, use_blocks, loop_exits);
677
678 free (loop_exits);
679
680 /* Fix up all the names found to be used outside their original
681 loops. */
682 update_ssa (TODO_update_ssa);
683 }
684
685 bitmap_obstack_release (&loop_renamer_obstack);
686 free (use_blocks);
687 }
688
689 /* Rewrites the non-virtual defs and uses into a loop closed ssa form. If
690 CHANGED_BBS is not NULL, we look for uses outside loops only in the basic
691 blocks in this set. UPDATE_FLAG is used in the call to update_ssa. See
692 TODO_update_ssa* for documentation. */
693
694 void
rewrite_into_loop_closed_ssa(bitmap changed_bbs,unsigned update_flag)695 rewrite_into_loop_closed_ssa (bitmap changed_bbs, unsigned update_flag)
696 {
697 rewrite_into_loop_closed_ssa_1 (changed_bbs, update_flag, SSA_OP_USE, NULL);
698 }
699
700 /* Rewrites virtual defs and uses with def in LOOP into loop closed ssa
701 form. */
702
703 void
rewrite_virtuals_into_loop_closed_ssa(class loop * loop)704 rewrite_virtuals_into_loop_closed_ssa (class loop *loop)
705 {
706 rewrite_into_loop_closed_ssa_1 (NULL, 0, SSA_OP_VIRTUAL_USES, loop);
707 }
708
709 /* Check invariants of the loop closed ssa form for the def in DEF_BB. */
710
711 static void
check_loop_closed_ssa_def(basic_block def_bb,tree def)712 check_loop_closed_ssa_def (basic_block def_bb, tree def)
713 {
714 use_operand_p use_p;
715 imm_use_iterator iterator;
716 FOR_EACH_IMM_USE_FAST (use_p, iterator, def)
717 {
718 if (is_gimple_debug (USE_STMT (use_p)))
719 continue;
720
721 basic_block use_bb = gimple_bb (USE_STMT (use_p));
722 if (is_a <gphi *> (USE_STMT (use_p)))
723 use_bb = EDGE_PRED (use_bb, PHI_ARG_INDEX_FROM_USE (use_p))->src;
724
725 gcc_assert (flow_bb_inside_loop_p (def_bb->loop_father, use_bb));
726 }
727 }
728
729 /* Checks invariants of loop closed ssa form in BB. */
730
731 static void
check_loop_closed_ssa_bb(basic_block bb)732 check_loop_closed_ssa_bb (basic_block bb)
733 {
734 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
735 gsi_next (&bsi))
736 {
737 gphi *phi = bsi.phi ();
738
739 if (!virtual_operand_p (PHI_RESULT (phi)))
740 check_loop_closed_ssa_def (bb, PHI_RESULT (phi));
741 }
742
743 for (gimple_stmt_iterator bsi = gsi_start_nondebug_bb (bb); !gsi_end_p (bsi);
744 gsi_next_nondebug (&bsi))
745 {
746 ssa_op_iter iter;
747 tree var;
748 gimple *stmt = gsi_stmt (bsi);
749
750 FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, SSA_OP_DEF)
751 check_loop_closed_ssa_def (bb, var);
752 }
753 }
754
755 /* Checks that invariants of the loop closed ssa form are preserved.
756 Call verify_ssa when VERIFY_SSA_P is true. Note all loops are checked
757 if LOOP is NULL, otherwise, only LOOP is checked. */
758
759 DEBUG_FUNCTION void
verify_loop_closed_ssa(bool verify_ssa_p,class loop * loop)760 verify_loop_closed_ssa (bool verify_ssa_p, class loop *loop)
761 {
762 if (number_of_loops (cfun) <= 1)
763 return;
764
765 if (verify_ssa_p)
766 verify_ssa (false, true);
767
768 timevar_push (TV_VERIFY_LOOP_CLOSED);
769
770 if (loop == NULL)
771 {
772 basic_block bb;
773
774 FOR_EACH_BB_FN (bb, cfun)
775 if (bb->loop_father && bb->loop_father->num > 0)
776 check_loop_closed_ssa_bb (bb);
777 }
778 else
779 {
780 basic_block *bbs = get_loop_body (loop);
781
782 for (unsigned i = 0; i < loop->num_nodes; ++i)
783 check_loop_closed_ssa_bb (bbs[i]);
784
785 free (bbs);
786 }
787
788 timevar_pop (TV_VERIFY_LOOP_CLOSED);
789 }
790
791 /* Split loop exit edge EXIT. The things are a bit complicated by a need to
792 preserve the loop closed ssa form. If COPY_CONSTANTS_P is true then
793 forwarder PHIs are also created for constant arguments.
794 The newly created block is returned. */
795
796 basic_block
split_loop_exit_edge(edge exit,bool copy_constants_p)797 split_loop_exit_edge (edge exit, bool copy_constants_p)
798 {
799 basic_block dest = exit->dest;
800 basic_block bb = split_edge (exit);
801 gphi *phi, *new_phi;
802 tree new_name, name;
803 use_operand_p op_p;
804 gphi_iterator psi;
805 location_t locus;
806
807 for (psi = gsi_start_phis (dest); !gsi_end_p (psi); gsi_next (&psi))
808 {
809 phi = psi.phi ();
810 op_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (bb));
811 locus = gimple_phi_arg_location_from_edge (phi, single_succ_edge (bb));
812
813 name = USE_FROM_PTR (op_p);
814
815 /* If the argument of the PHI node is a constant, we do not need
816 to keep it inside loop. */
817 if (TREE_CODE (name) != SSA_NAME
818 && !copy_constants_p)
819 continue;
820
821 /* Otherwise create an auxiliary phi node that will copy the value
822 of the SSA name out of the loop. */
823 new_name = duplicate_ssa_name (PHI_RESULT (phi), NULL);
824 new_phi = create_phi_node (new_name, bb);
825 add_phi_arg (new_phi, name, exit, locus);
826 SET_USE (op_p, new_name);
827 }
828
829 return bb;
830 }
831
832 /* Returns the basic block in that statements should be emitted for induction
833 variables incremented at the end of the LOOP. */
834
835 basic_block
ip_end_pos(class loop * loop)836 ip_end_pos (class loop *loop)
837 {
838 return loop->latch;
839 }
840
841 /* Returns the basic block in that statements should be emitted for induction
842 variables incremented just before exit condition of a LOOP. */
843
844 basic_block
ip_normal_pos(class loop * loop)845 ip_normal_pos (class loop *loop)
846 {
847 gimple *last;
848 basic_block bb;
849 edge exit;
850
851 if (!single_pred_p (loop->latch))
852 return NULL;
853
854 bb = single_pred (loop->latch);
855 last = last_stmt (bb);
856 if (!last
857 || gimple_code (last) != GIMPLE_COND)
858 return NULL;
859
860 exit = EDGE_SUCC (bb, 0);
861 if (exit->dest == loop->latch)
862 exit = EDGE_SUCC (bb, 1);
863
864 if (flow_bb_inside_loop_p (loop, exit->dest))
865 return NULL;
866
867 return bb;
868 }
869
870 /* Stores the standard position for induction variable increment in LOOP
871 (just before the exit condition if it is available and latch block is empty,
872 end of the latch block otherwise) to BSI. INSERT_AFTER is set to true if
873 the increment should be inserted after *BSI. */
874
875 void
standard_iv_increment_position(class loop * loop,gimple_stmt_iterator * bsi,bool * insert_after)876 standard_iv_increment_position (class loop *loop, gimple_stmt_iterator *bsi,
877 bool *insert_after)
878 {
879 basic_block bb = ip_normal_pos (loop), latch = ip_end_pos (loop);
880 gimple *last = last_stmt (latch);
881
882 if (!bb
883 || (last && gimple_code (last) != GIMPLE_LABEL))
884 {
885 *bsi = gsi_last_bb (latch);
886 *insert_after = true;
887 }
888 else
889 {
890 *bsi = gsi_last_bb (bb);
891 *insert_after = false;
892 }
893 }
894
895 /* Copies phi node arguments for duplicated blocks. The index of the first
896 duplicated block is FIRST_NEW_BLOCK. */
897
898 static void
copy_phi_node_args(unsigned first_new_block)899 copy_phi_node_args (unsigned first_new_block)
900 {
901 unsigned i;
902
903 for (i = first_new_block; i < (unsigned) last_basic_block_for_fn (cfun); i++)
904 BASIC_BLOCK_FOR_FN (cfun, i)->flags |= BB_DUPLICATED;
905
906 for (i = first_new_block; i < (unsigned) last_basic_block_for_fn (cfun); i++)
907 add_phi_args_after_copy_bb (BASIC_BLOCK_FOR_FN (cfun, i));
908
909 for (i = first_new_block; i < (unsigned) last_basic_block_for_fn (cfun); i++)
910 BASIC_BLOCK_FOR_FN (cfun, i)->flags &= ~BB_DUPLICATED;
911 }
912
913
914 /* The same as cfgloopmanip.cc:duplicate_loop_body_to_header_edge, but also
915 updates the PHI nodes at start of the copied region. In order to
916 achieve this, only loops whose exits all lead to the same location
917 are handled.
918
919 Notice that we do not completely update the SSA web after
920 duplication. The caller is responsible for calling update_ssa
921 after the loop has been duplicated. */
922
923 bool
gimple_duplicate_loop_body_to_header_edge(class loop * loop,edge e,unsigned int ndupl,sbitmap wont_exit,edge orig,vec<edge> * to_remove,int flags)924 gimple_duplicate_loop_body_to_header_edge (class loop *loop, edge e,
925 unsigned int ndupl,
926 sbitmap wont_exit, edge orig,
927 vec<edge> *to_remove, int flags)
928 {
929 unsigned first_new_block;
930
931 if (!loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
932 return false;
933 if (!loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS))
934 return false;
935
936 first_new_block = last_basic_block_for_fn (cfun);
937 if (!duplicate_loop_body_to_header_edge (loop, e, ndupl, wont_exit, orig,
938 to_remove, flags))
939 return false;
940
941 /* Readd the removed phi args for e. */
942 flush_pending_stmts (e);
943
944 /* Copy the phi node arguments. */
945 copy_phi_node_args (first_new_block);
946
947 scev_reset ();
948
949 return true;
950 }
951
952 /* Returns true if we can unroll LOOP FACTOR times. Number
953 of iterations of the loop is returned in NITER. */
954
955 bool
can_unroll_loop_p(class loop * loop,unsigned factor,class tree_niter_desc * niter)956 can_unroll_loop_p (class loop *loop, unsigned factor,
957 class tree_niter_desc *niter)
958 {
959 edge exit;
960
961 /* Check whether unrolling is possible. We only want to unroll loops
962 for that we are able to determine number of iterations. We also
963 want to split the extra iterations of the loop from its end,
964 therefore we require that the loop has precisely one
965 exit. */
966
967 exit = single_dom_exit (loop);
968 if (!exit)
969 return false;
970
971 if (!number_of_iterations_exit (loop, exit, niter, false)
972 || niter->cmp == ERROR_MARK
973 /* Scalar evolutions analysis might have copy propagated
974 the abnormal ssa names into these expressions, hence
975 emitting the computations based on them during loop
976 unrolling might create overlapping life ranges for
977 them, and failures in out-of-ssa. */
978 || contains_abnormal_ssa_name_p (niter->may_be_zero)
979 || contains_abnormal_ssa_name_p (niter->control.base)
980 || contains_abnormal_ssa_name_p (niter->control.step)
981 || contains_abnormal_ssa_name_p (niter->bound))
982 return false;
983
984 /* And of course, we must be able to duplicate the loop. */
985 if (!can_duplicate_loop_p (loop))
986 return false;
987
988 /* The final loop should be small enough. */
989 if (tree_num_loop_insns (loop, &eni_size_weights) * factor
990 > (unsigned) param_max_unrolled_insns)
991 return false;
992
993 return true;
994 }
995
996 /* Determines the conditions that control execution of LOOP unrolled FACTOR
997 times. DESC is number of iterations of LOOP. ENTER_COND is set to
998 condition that must be true if the main loop can be entered.
999 If the loop does not always iterate an exact multiple of FACTOR times,
1000 EXIT_BASE, EXIT_STEP, EXIT_CMP and EXIT_BOUND are set to values describing
1001 how the exit from the unrolled loop should be controlled. Otherwise,
1002 the trees are set to null and EXIT_CMP is set to ERROR_MARK. */
1003
1004 static void
determine_exit_conditions(class loop * loop,class tree_niter_desc * desc,unsigned factor,tree * enter_cond,tree * exit_base,tree * exit_step,enum tree_code * exit_cmp,tree * exit_bound)1005 determine_exit_conditions (class loop *loop, class tree_niter_desc *desc,
1006 unsigned factor, tree *enter_cond,
1007 tree *exit_base, tree *exit_step,
1008 enum tree_code *exit_cmp, tree *exit_bound)
1009 {
1010 gimple_seq stmts;
1011 tree base = desc->control.base;
1012 tree step = desc->control.step;
1013 tree bound = desc->bound;
1014 tree type = TREE_TYPE (step);
1015 tree bigstep, delta;
1016 tree min = lower_bound_in_type (type, type);
1017 tree max = upper_bound_in_type (type, type);
1018 enum tree_code cmp = desc->cmp;
1019 tree cond = boolean_true_node, assum;
1020
1021 /* For pointers, do the arithmetics in the type of step. */
1022 base = fold_convert (type, base);
1023 bound = fold_convert (type, bound);
1024
1025 *enter_cond = boolean_false_node;
1026 *exit_base = NULL_TREE;
1027 *exit_step = NULL_TREE;
1028 *exit_cmp = ERROR_MARK;
1029 *exit_bound = NULL_TREE;
1030 gcc_assert (cmp != ERROR_MARK);
1031
1032 /* We only need to be correct when we answer question
1033 "Do at least FACTOR more iterations remain?" in the unrolled loop.
1034 Thus, transforming BASE + STEP * i <> BOUND to
1035 BASE + STEP * i < BOUND is ok. */
1036 if (cmp == NE_EXPR)
1037 {
1038 if (tree_int_cst_sign_bit (step))
1039 cmp = GT_EXPR;
1040 else
1041 cmp = LT_EXPR;
1042 }
1043 else if (cmp == LT_EXPR)
1044 {
1045 gcc_assert (!tree_int_cst_sign_bit (step));
1046 }
1047 else if (cmp == GT_EXPR)
1048 {
1049 gcc_assert (tree_int_cst_sign_bit (step));
1050 }
1051 else
1052 gcc_unreachable ();
1053
1054 /* The main body of the loop may be entered iff:
1055
1056 1) desc->may_be_zero is false.
1057 2) it is possible to check that there are at least FACTOR iterations
1058 of the loop, i.e., BOUND - step * FACTOR does not overflow.
1059 3) # of iterations is at least FACTOR */
1060
1061 if (!integer_zerop (desc->may_be_zero))
1062 cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1063 invert_truthvalue (desc->may_be_zero),
1064 cond);
1065
1066 bigstep = fold_build2 (MULT_EXPR, type, step,
1067 build_int_cst_type (type, factor));
1068 delta = fold_build2 (MINUS_EXPR, type, bigstep, step);
1069 if (cmp == LT_EXPR)
1070 assum = fold_build2 (GE_EXPR, boolean_type_node,
1071 bound,
1072 fold_build2 (PLUS_EXPR, type, min, delta));
1073 else
1074 assum = fold_build2 (LE_EXPR, boolean_type_node,
1075 bound,
1076 fold_build2 (PLUS_EXPR, type, max, delta));
1077 cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, assum, cond);
1078
1079 bound = fold_build2 (MINUS_EXPR, type, bound, delta);
1080 assum = fold_build2 (cmp, boolean_type_node, base, bound);
1081 cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, assum, cond);
1082
1083 if (integer_nonzerop (cond)
1084 && integer_zerop (desc->may_be_zero))
1085 {
1086 /* Convert the latch count to an iteration count. */
1087 tree niter = fold_build2 (PLUS_EXPR, type, desc->niter,
1088 build_one_cst (type));
1089 if (multiple_of_p (type, niter, bigstep))
1090 return;
1091 }
1092
1093 cond = force_gimple_operand (unshare_expr (cond), &stmts, false, NULL_TREE);
1094 if (stmts)
1095 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
1096 /* cond now may be a gimple comparison, which would be OK, but also any
1097 other gimple rhs (say a && b). In this case we need to force it to
1098 operand. */
1099 if (!is_gimple_condexpr (cond))
1100 {
1101 cond = force_gimple_operand (cond, &stmts, true, NULL_TREE);
1102 if (stmts)
1103 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
1104 }
1105 *enter_cond = cond;
1106
1107 base = force_gimple_operand (unshare_expr (base), &stmts, true, NULL_TREE);
1108 if (stmts)
1109 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
1110 bound = force_gimple_operand (unshare_expr (bound), &stmts, true, NULL_TREE);
1111 if (stmts)
1112 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
1113
1114 *exit_base = base;
1115 *exit_step = bigstep;
1116 *exit_cmp = cmp;
1117 *exit_bound = bound;
1118 }
1119
1120 /* Scales the frequencies of all basic blocks in LOOP that are strictly
1121 dominated by BB by NUM/DEN. */
1122
1123 static void
scale_dominated_blocks_in_loop(class loop * loop,basic_block bb,profile_count num,profile_count den)1124 scale_dominated_blocks_in_loop (class loop *loop, basic_block bb,
1125 profile_count num, profile_count den)
1126 {
1127 basic_block son;
1128
1129 if (!den.nonzero_p () && !(num == profile_count::zero ()))
1130 return;
1131
1132 for (son = first_dom_son (CDI_DOMINATORS, bb);
1133 son;
1134 son = next_dom_son (CDI_DOMINATORS, son))
1135 {
1136 if (!flow_bb_inside_loop_p (loop, son))
1137 continue;
1138 scale_bbs_frequencies_profile_count (&son, 1, num, den);
1139 scale_dominated_blocks_in_loop (loop, son, num, den);
1140 }
1141 }
1142
1143 /* Return estimated niter for LOOP after unrolling by FACTOR times. */
1144
1145 gcov_type
niter_for_unrolled_loop(class loop * loop,unsigned factor)1146 niter_for_unrolled_loop (class loop *loop, unsigned factor)
1147 {
1148 gcc_assert (factor != 0);
1149 bool profile_p = false;
1150 gcov_type est_niter = expected_loop_iterations_unbounded (loop, &profile_p);
1151 /* Note that this is really CEIL (est_niter + 1, factor) - 1, where the
1152 "+ 1" converts latch iterations to loop iterations and the "- 1"
1153 converts back. */
1154 gcov_type new_est_niter = est_niter / factor;
1155
1156 if (est_niter == -1)
1157 return -1;
1158
1159 /* Without profile feedback, loops for which we do not know a better estimate
1160 are assumed to roll 10 times. When we unroll such loop, it appears to
1161 roll too little, and it may even seem to be cold. To avoid this, we
1162 ensure that the created loop appears to roll at least 5 times (but at
1163 most as many times as before unrolling). Don't do adjustment if profile
1164 feedback is present. */
1165 if (new_est_niter < 5 && !profile_p)
1166 {
1167 if (est_niter < 5)
1168 new_est_niter = est_niter;
1169 else
1170 new_est_niter = 5;
1171 }
1172
1173 if (loop->any_upper_bound)
1174 {
1175 /* As above, this is really CEIL (upper_bound + 1, factor) - 1. */
1176 widest_int bound = wi::udiv_floor (loop->nb_iterations_upper_bound,
1177 factor);
1178 if (wi::ltu_p (bound, new_est_niter))
1179 new_est_niter = bound.to_uhwi ();
1180 }
1181
1182 return new_est_niter;
1183 }
1184
1185 /* Unroll LOOP FACTOR times. LOOP is known to have a single exit edge
1186 whose source block dominates the latch. DESC describes the number of
1187 iterations of LOOP.
1188
1189 If N is number of iterations of the loop and MAY_BE_ZERO is the condition
1190 under that loop exits in the first iteration even if N != 0,
1191
1192 while (1)
1193 {
1194 x = phi (init, next);
1195
1196 pre;
1197 if (st)
1198 break;
1199 post;
1200 }
1201
1202 becomes (with possibly the exit conditions formulated a bit differently,
1203 avoiding the need to create a new iv):
1204
1205 if (MAY_BE_ZERO || N < FACTOR)
1206 goto rest;
1207
1208 do
1209 {
1210 x = phi (init, next);
1211
1212 pre;
1213 post;
1214 pre;
1215 post;
1216 ...
1217 pre;
1218 post;
1219 N -= FACTOR;
1220
1221 } while (N >= FACTOR);
1222
1223 rest:
1224 init' = phi (init, x);
1225
1226 while (1)
1227 {
1228 x = phi (init', next);
1229
1230 pre;
1231 if (st)
1232 break;
1233 post;
1234 }
1235
1236 Before the loop is unrolled, TRANSFORM is called for it (only for the
1237 unrolled loop, but not for its versioned copy). DATA is passed to
1238 TRANSFORM. */
1239
1240 /* Probability in % that the unrolled loop is entered. Just a guess. */
1241 #define PROB_UNROLLED_LOOP_ENTERED 90
1242
1243 void
tree_transform_and_unroll_loop(class loop * loop,unsigned factor,class tree_niter_desc * desc,transform_callback transform,void * data)1244 tree_transform_and_unroll_loop (class loop *loop, unsigned factor,
1245 class tree_niter_desc *desc,
1246 transform_callback transform,
1247 void *data)
1248 {
1249 gcov_type new_est_niter = niter_for_unrolled_loop (loop, factor);
1250 unsigned irr = loop_preheader_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP;
1251
1252 enum tree_code exit_cmp;
1253 tree enter_main_cond, exit_base, exit_step, exit_bound;
1254 determine_exit_conditions (loop, desc, factor,
1255 &enter_main_cond, &exit_base, &exit_step,
1256 &exit_cmp, &exit_bound);
1257 bool single_loop_p = !exit_base;
1258
1259 /* Let us assume that the unrolled loop is quite likely to be entered. */
1260 profile_probability prob_entry;
1261 if (integer_nonzerop (enter_main_cond))
1262 prob_entry = profile_probability::always ();
1263 else
1264 prob_entry = profile_probability::guessed_always ()
1265 .apply_scale (PROB_UNROLLED_LOOP_ENTERED, 100);
1266
1267 gcond *exit_if = nullptr;
1268 class loop *new_loop = nullptr;
1269 edge new_exit;
1270 if (!single_loop_p)
1271 {
1272 edge exit = single_dom_exit (loop);
1273
1274 /* The values for scales should keep profile consistent, and somewhat
1275 close to correct.
1276
1277 TODO: The current value of SCALE_REST makes it appear that the loop
1278 that is created by splitting the remaining iterations of the unrolled
1279 loop is executed the same number of times as the original loop, and
1280 with the same frequencies, which is obviously wrong. This does not
1281 appear to cause problems, so we do not bother with fixing it for now.
1282 To make the profile correct, we would need to change the probability
1283 of the exit edge of the loop, and recompute the distribution of
1284 frequencies in its body because of this change (scale the frequencies
1285 of blocks before and after the exit by appropriate factors). */
1286 profile_probability scale_unrolled = prob_entry;
1287 new_loop = loop_version (loop, enter_main_cond, NULL, prob_entry,
1288 prob_entry.invert (), scale_unrolled,
1289 profile_probability::guessed_always (),
1290 true);
1291 gcc_assert (new_loop != NULL);
1292 update_ssa (TODO_update_ssa);
1293
1294 /* Prepare the cfg and update the phi nodes. Move the loop exit to the
1295 loop latch (and make its condition dummy, for the moment). */
1296 basic_block rest = loop_preheader_edge (new_loop)->src;
1297 edge precond_edge = single_pred_edge (rest);
1298 split_edge (loop_latch_edge (loop));
1299 basic_block exit_bb = single_pred (loop->latch);
1300
1301 /* Since the exit edge will be removed, the frequency of all the blocks
1302 in the loop that are dominated by it must be scaled by
1303 1 / (1 - exit->probability). */
1304 if (exit->probability.initialized_p ())
1305 scale_dominated_blocks_in_loop (loop, exit->src,
1306 /* We are scaling up here so
1307 probability does not fit. */
1308 loop->header->count,
1309 loop->header->count
1310 - loop->header->count.apply_probability
1311 (exit->probability));
1312
1313 gimple_stmt_iterator bsi = gsi_last_bb (exit_bb);
1314 exit_if = gimple_build_cond (EQ_EXPR, integer_zero_node,
1315 integer_zero_node,
1316 NULL_TREE, NULL_TREE);
1317
1318 gsi_insert_after (&bsi, exit_if, GSI_NEW_STMT);
1319 new_exit = make_edge (exit_bb, rest, EDGE_FALSE_VALUE | irr);
1320 rescan_loop_exit (new_exit, true, false);
1321
1322 /* Set the probability of new exit to the same of the old one. Fix
1323 the frequency of the latch block, by scaling it back by
1324 1 - exit->probability. */
1325 new_exit->probability = exit->probability;
1326 edge new_nonexit = single_pred_edge (loop->latch);
1327 new_nonexit->probability = exit->probability.invert ();
1328 new_nonexit->flags = EDGE_TRUE_VALUE;
1329 if (new_nonexit->probability.initialized_p ())
1330 scale_bbs_frequencies (&loop->latch, 1, new_nonexit->probability);
1331
1332 edge old_entry = loop_preheader_edge (loop);
1333 edge new_entry = loop_preheader_edge (new_loop);
1334 edge old_latch = loop_latch_edge (loop);
1335 for (gphi_iterator psi_old_loop = gsi_start_phis (loop->header),
1336 psi_new_loop = gsi_start_phis (new_loop->header);
1337 !gsi_end_p (psi_old_loop);
1338 gsi_next (&psi_old_loop), gsi_next (&psi_new_loop))
1339 {
1340 gphi *phi_old_loop = psi_old_loop.phi ();
1341 gphi *phi_new_loop = psi_new_loop.phi ();
1342
1343 tree init = PHI_ARG_DEF_FROM_EDGE (phi_old_loop, old_entry);
1344 use_operand_p op
1345 = PHI_ARG_DEF_PTR_FROM_EDGE (phi_new_loop, new_entry);
1346 gcc_assert (operand_equal_for_phi_arg_p (init, USE_FROM_PTR (op)));
1347 tree next = PHI_ARG_DEF_FROM_EDGE (phi_old_loop, old_latch);
1348
1349 /* Prefer using original variable as a base for the new ssa name.
1350 This is necessary for virtual ops, and useful in order to avoid
1351 losing debug info for real ops. */
1352 tree new_init;
1353 if (TREE_CODE (next) == SSA_NAME
1354 && useless_type_conversion_p (TREE_TYPE (next),
1355 TREE_TYPE (init)))
1356 new_init = copy_ssa_name (next);
1357 else if (TREE_CODE (init) == SSA_NAME
1358 && useless_type_conversion_p (TREE_TYPE (init),
1359 TREE_TYPE (next)))
1360 new_init = copy_ssa_name (init);
1361 else if (useless_type_conversion_p (TREE_TYPE (next),
1362 TREE_TYPE (init)))
1363 new_init = make_temp_ssa_name (TREE_TYPE (next), NULL,
1364 "unrinittmp");
1365 else
1366 new_init = make_temp_ssa_name (TREE_TYPE (init), NULL,
1367 "unrinittmp");
1368
1369 gphi *phi_rest = create_phi_node (new_init, rest);
1370 add_phi_arg (phi_rest, init, precond_edge, UNKNOWN_LOCATION);
1371 add_phi_arg (phi_rest, next, new_exit, UNKNOWN_LOCATION);
1372 SET_USE (op, new_init);
1373 }
1374
1375 remove_path (exit);
1376 }
1377 else
1378 new_exit = single_dom_exit (loop);
1379
1380 /* Transform the loop. */
1381 if (transform)
1382 (*transform) (loop, data);
1383
1384 /* Unroll the loop and remove the exits in all iterations except for the
1385 last one. */
1386 auto_sbitmap wont_exit (factor);
1387 bitmap_ones (wont_exit);
1388 bitmap_clear_bit (wont_exit, factor - 1);
1389
1390 auto_vec<edge> to_remove;
1391 bool ok
1392 = gimple_duplicate_loop_body_to_header_edge (loop, loop_latch_edge (loop),
1393 factor - 1, wont_exit,
1394 new_exit, &to_remove,
1395 DLTHE_FLAG_UPDATE_FREQ);
1396 gcc_assert (ok);
1397
1398 for (edge e : to_remove)
1399 {
1400 ok = remove_path (e);
1401 gcc_assert (ok);
1402 }
1403 update_ssa (TODO_update_ssa);
1404
1405 new_exit = single_dom_exit (loop);
1406 if (!single_loop_p)
1407 {
1408 /* Ensure that the frequencies in the loop match the new estimated
1409 number of iterations, and change the probability of the new
1410 exit edge. */
1411
1412 profile_count freq_h = loop->header->count;
1413 profile_count freq_e = (loop_preheader_edge (loop))->count ();
1414 if (freq_h.nonzero_p ())
1415 {
1416 /* Avoid dropping loop body profile counter to 0 because of zero
1417 count in loop's preheader. */
1418 if (freq_h.nonzero_p () && !(freq_e == profile_count::zero ()))
1419 freq_e = freq_e.force_nonzero ();
1420 scale_loop_frequencies (loop, freq_e.probability_in (freq_h));
1421 }
1422
1423 basic_block rest = new_exit->dest;
1424 new_exit->probability = profile_probability::always ()
1425 .apply_scale (1, new_est_niter + 1);
1426
1427 rest->count += new_exit->count ();
1428
1429 edge new_nonexit = single_pred_edge (loop->latch);
1430 profile_probability prob = new_nonexit->probability;
1431 new_nonexit->probability = new_exit->probability.invert ();
1432 prob = new_nonexit->probability / prob;
1433 if (prob.initialized_p ())
1434 scale_bbs_frequencies (&loop->latch, 1, prob);
1435
1436 /* Finally create the new counter for number of iterations and add
1437 the new exit instruction. */
1438 tree ctr_before, ctr_after;
1439 gimple_stmt_iterator bsi = gsi_last_nondebug_bb (new_exit->src);
1440 exit_if = as_a <gcond *> (gsi_stmt (bsi));
1441 create_iv (exit_base, exit_step, NULL_TREE, loop,
1442 &bsi, false, &ctr_before, &ctr_after);
1443 gimple_cond_set_code (exit_if, exit_cmp);
1444 gimple_cond_set_lhs (exit_if, ctr_after);
1445 gimple_cond_set_rhs (exit_if, exit_bound);
1446 update_stmt (exit_if);
1447 }
1448 else
1449 {
1450 /* gimple_duplicate_loop_to_header_edge has adjusted the loop body's
1451 original profile counts in line with the unroll factor. However,
1452 the old counts might not have been consistent with the old
1453 iteration count.
1454
1455 Therefore, if the iteration count is known exactly, make sure that the
1456 profile counts of the loop header (and any other blocks that might be
1457 executed in the final iteration) are consistent with the combination
1458 of (a) the incoming profile count and (b) the new iteration count. */
1459 profile_count in_count = loop_preheader_edge (loop)->count ();
1460 profile_count old_header_count = loop->header->count;
1461 if (in_count.nonzero_p ()
1462 && old_header_count.nonzero_p ()
1463 && TREE_CODE (desc->niter) == INTEGER_CST)
1464 {
1465 /* The + 1 converts latch counts to iteration counts. */
1466 profile_count new_header_count
1467 = (in_count.apply_scale (new_est_niter + 1, 1));
1468 basic_block *body = get_loop_body (loop);
1469 scale_bbs_frequencies_profile_count (body, loop->num_nodes,
1470 new_header_count,
1471 old_header_count);
1472 free (body);
1473 }
1474
1475 /* gimple_duplicate_loop_to_header_edge discarded FACTOR - 1
1476 exit edges and adjusted the loop body's profile counts for the
1477 new probabilities of the remaining non-exit edges. However,
1478 the remaining exit edge still has the same probability as it
1479 did before, even though it is now more likely.
1480
1481 Therefore, all blocks executed after a failed exit test now have
1482 a profile count that is too high, and the sum of the profile counts
1483 for the header's incoming edges is greater than the profile count
1484 of the header itself.
1485
1486 Adjust the profile counts of all code in the loop body after
1487 the exit test so that the sum of the counts on entry to the
1488 header agree. */
1489 profile_count old_latch_count = loop_latch_edge (loop)->count ();
1490 profile_count new_latch_count = loop->header->count - in_count;
1491 if (old_latch_count.nonzero_p () && new_latch_count.nonzero_p ())
1492 scale_dominated_blocks_in_loop (loop, new_exit->src, new_latch_count,
1493 old_latch_count);
1494
1495 /* Set the probability of the exit edge based on NEW_EST_NITER
1496 (which estimates latch counts rather than iteration counts).
1497 Update the probabilities of other edges to match.
1498
1499 If the profile counts are large enough to give the required
1500 precision, the updates above will have made
1501
1502 e->dest->count / e->src->count ~= new e->probability
1503
1504 for every outgoing edge e of NEW_EXIT->src. */
1505 profile_probability new_exit_prob = profile_probability::always ()
1506 .apply_scale (1, new_est_niter + 1);
1507 change_edge_frequency (new_exit, new_exit_prob);
1508 }
1509
1510 checking_verify_flow_info ();
1511 checking_verify_loop_structure ();
1512 checking_verify_loop_closed_ssa (true, loop);
1513 checking_verify_loop_closed_ssa (true, new_loop);
1514 }
1515
1516 /* Wrapper over tree_transform_and_unroll_loop for case we do not
1517 want to transform the loop before unrolling. The meaning
1518 of the arguments is the same as for tree_transform_and_unroll_loop. */
1519
1520 void
tree_unroll_loop(class loop * loop,unsigned factor,class tree_niter_desc * desc)1521 tree_unroll_loop (class loop *loop, unsigned factor,
1522 class tree_niter_desc *desc)
1523 {
1524 tree_transform_and_unroll_loop (loop, factor, desc, NULL, NULL);
1525 }
1526
1527 /* Rewrite the phi node at position PSI in function of the main
1528 induction variable MAIN_IV and insert the generated code at GSI. */
1529
1530 static void
rewrite_phi_with_iv(loop_p loop,gphi_iterator * psi,gimple_stmt_iterator * gsi,tree main_iv)1531 rewrite_phi_with_iv (loop_p loop,
1532 gphi_iterator *psi,
1533 gimple_stmt_iterator *gsi,
1534 tree main_iv)
1535 {
1536 affine_iv iv;
1537 gassign *stmt;
1538 gphi *phi = psi->phi ();
1539 tree atype, mtype, val, res = PHI_RESULT (phi);
1540
1541 if (virtual_operand_p (res) || res == main_iv)
1542 {
1543 gsi_next (psi);
1544 return;
1545 }
1546
1547 if (!simple_iv (loop, loop, res, &iv, true))
1548 {
1549 gsi_next (psi);
1550 return;
1551 }
1552
1553 remove_phi_node (psi, false);
1554
1555 atype = TREE_TYPE (res);
1556 mtype = POINTER_TYPE_P (atype) ? sizetype : atype;
1557 val = fold_build2 (MULT_EXPR, mtype, unshare_expr (iv.step),
1558 fold_convert (mtype, main_iv));
1559 val = fold_build2 (POINTER_TYPE_P (atype)
1560 ? POINTER_PLUS_EXPR : PLUS_EXPR,
1561 atype, unshare_expr (iv.base), val);
1562 val = force_gimple_operand_gsi (gsi, val, false, NULL_TREE, true,
1563 GSI_SAME_STMT);
1564 stmt = gimple_build_assign (res, val);
1565 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1566 }
1567
1568 /* Rewrite all the phi nodes of LOOP in function of the main induction
1569 variable MAIN_IV. */
1570
1571 static void
rewrite_all_phi_nodes_with_iv(loop_p loop,tree main_iv)1572 rewrite_all_phi_nodes_with_iv (loop_p loop, tree main_iv)
1573 {
1574 unsigned i;
1575 basic_block *bbs = get_loop_body_in_dom_order (loop);
1576 gphi_iterator psi;
1577
1578 for (i = 0; i < loop->num_nodes; i++)
1579 {
1580 basic_block bb = bbs[i];
1581 gimple_stmt_iterator gsi = gsi_after_labels (bb);
1582
1583 if (bb->loop_father != loop)
1584 continue;
1585
1586 for (psi = gsi_start_phis (bb); !gsi_end_p (psi); )
1587 rewrite_phi_with_iv (loop, &psi, &gsi, main_iv);
1588 }
1589
1590 free (bbs);
1591 }
1592
1593 /* Bases all the induction variables in LOOP on a single induction variable
1594 (with base 0 and step 1), whose final value is compared with *NIT. When the
1595 IV type precision has to be larger than *NIT type precision, *NIT is
1596 converted to the larger type, the conversion code is inserted before the
1597 loop, and *NIT is updated to the new definition. When BUMP_IN_LATCH is true,
1598 the induction variable is incremented in the loop latch, otherwise it is
1599 incremented in the loop header. Return the induction variable that was
1600 created. */
1601
1602 tree
canonicalize_loop_ivs(class loop * loop,tree * nit,bool bump_in_latch)1603 canonicalize_loop_ivs (class loop *loop, tree *nit, bool bump_in_latch)
1604 {
1605 unsigned precision = TYPE_PRECISION (TREE_TYPE (*nit));
1606 unsigned original_precision = precision;
1607 tree type, var_before;
1608 gimple_stmt_iterator gsi;
1609 gphi_iterator psi;
1610 gcond *stmt;
1611 edge exit = single_dom_exit (loop);
1612 gimple_seq stmts;
1613 bool unsigned_p = false;
1614
1615 for (psi = gsi_start_phis (loop->header);
1616 !gsi_end_p (psi); gsi_next (&psi))
1617 {
1618 gphi *phi = psi.phi ();
1619 tree res = PHI_RESULT (phi);
1620 bool uns;
1621
1622 type = TREE_TYPE (res);
1623 if (virtual_operand_p (res)
1624 || (!INTEGRAL_TYPE_P (type)
1625 && !POINTER_TYPE_P (type))
1626 || TYPE_PRECISION (type) < precision)
1627 continue;
1628
1629 uns = POINTER_TYPE_P (type) | TYPE_UNSIGNED (type);
1630
1631 if (TYPE_PRECISION (type) > precision)
1632 unsigned_p = uns;
1633 else
1634 unsigned_p |= uns;
1635
1636 precision = TYPE_PRECISION (type);
1637 }
1638
1639 scalar_int_mode mode = smallest_int_mode_for_size (precision);
1640 precision = GET_MODE_PRECISION (mode);
1641 type = build_nonstandard_integer_type (precision, unsigned_p);
1642
1643 if (original_precision != precision
1644 || TYPE_UNSIGNED (TREE_TYPE (*nit)) != unsigned_p)
1645 {
1646 *nit = fold_convert (type, *nit);
1647 *nit = force_gimple_operand (*nit, &stmts, true, NULL_TREE);
1648 if (stmts)
1649 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
1650 }
1651
1652 if (bump_in_latch)
1653 gsi = gsi_last_bb (loop->latch);
1654 else
1655 gsi = gsi_last_nondebug_bb (loop->header);
1656 create_iv (build_int_cst_type (type, 0), build_int_cst (type, 1), NULL_TREE,
1657 loop, &gsi, bump_in_latch, &var_before, NULL);
1658
1659 rewrite_all_phi_nodes_with_iv (loop, var_before);
1660
1661 stmt = as_a <gcond *> (last_stmt (exit->src));
1662 /* Make the loop exit if the control condition is not satisfied. */
1663 if (exit->flags & EDGE_TRUE_VALUE)
1664 {
1665 edge te, fe;
1666
1667 extract_true_false_edges_from_block (exit->src, &te, &fe);
1668 te->flags = EDGE_FALSE_VALUE;
1669 fe->flags = EDGE_TRUE_VALUE;
1670 }
1671 gimple_cond_set_code (stmt, LT_EXPR);
1672 gimple_cond_set_lhs (stmt, var_before);
1673 gimple_cond_set_rhs (stmt, *nit);
1674 update_stmt (stmt);
1675
1676 return var_before;
1677 }
1678