1 /* Induction variable canonicalization and loop peeling.
2 Copyright (C) 2004-2018 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This pass detects the loops that iterate a constant number of times,
21 adds a canonical induction variable (step -1, tested against 0)
22 and replaces the exit test. This enables the less powerful rtl
23 level analysis to use this information.
24
25 This might spoil the code in some cases (by increasing register pressure).
26 Note that in the case the new variable is not needed, ivopts will get rid
27 of it, so it might only be a problem when there are no other linear induction
28 variables. In that case the created optimization possibilities are likely
29 to pay up.
30
31 We also perform
32 - complete unrolling (or peeling) when the loops is rolling few enough
33 times
34 - simple peeling (i.e. copying few initial iterations prior the loop)
35 when number of iteration estimate is known (typically by the profile
36 info). */
37
38 #include "config.h"
39 #include "system.h"
40 #include "coretypes.h"
41 #include "backend.h"
42 #include "tree.h"
43 #include "gimple.h"
44 #include "cfghooks.h"
45 #include "tree-pass.h"
46 #include "ssa.h"
47 #include "cgraph.h"
48 #include "gimple-pretty-print.h"
49 #include "fold-const.h"
50 #include "profile.h"
51 #include "gimple-fold.h"
52 #include "tree-eh.h"
53 #include "gimple-iterator.h"
54 #include "tree-cfg.h"
55 #include "tree-ssa-loop-manip.h"
56 #include "tree-ssa-loop-niter.h"
57 #include "tree-ssa-loop.h"
58 #include "tree-into-ssa.h"
59 #include "cfgloop.h"
60 #include "tree-chrec.h"
61 #include "tree-scalar-evolution.h"
62 #include "params.h"
63 #include "tree-inline.h"
64 #include "tree-cfgcleanup.h"
65 #include "builtins.h"
66
67 /* Specifies types of loops that may be unrolled. */
68
69 enum unroll_level
70 {
71 UL_SINGLE_ITER, /* Only loops that exit immediately in the first
72 iteration. */
73 UL_NO_GROWTH, /* Only loops whose unrolling will not cause increase
74 of code size. */
75 UL_ALL /* All suitable loops. */
76 };
77
78 /* Adds a canonical induction variable to LOOP iterating NITER times. EXIT
79 is the exit edge whose condition is replaced. The ssa versions of the new
80 IV before and after increment will be stored in VAR_BEFORE and VAR_AFTER
81 if they are not NULL. */
82
83 void
84 create_canonical_iv (struct loop *loop, edge exit, tree niter,
85 tree *var_before = NULL, tree *var_after = NULL)
86 {
87 edge in;
88 tree type, var;
89 gcond *cond;
90 gimple_stmt_iterator incr_at;
91 enum tree_code cmp;
92
93 if (dump_file && (dump_flags & TDF_DETAILS))
94 {
95 fprintf (dump_file, "Added canonical iv to loop %d, ", loop->num);
96 print_generic_expr (dump_file, niter, TDF_SLIM);
97 fprintf (dump_file, " iterations.\n");
98 }
99
100 cond = as_a <gcond *> (last_stmt (exit->src));
101 in = EDGE_SUCC (exit->src, 0);
102 if (in == exit)
103 in = EDGE_SUCC (exit->src, 1);
104
105 /* Note that we do not need to worry about overflows, since
106 type of niter is always unsigned and all comparisons are
107 just for equality/nonequality -- i.e. everything works
108 with a modulo arithmetics. */
109
110 type = TREE_TYPE (niter);
111 niter = fold_build2 (PLUS_EXPR, type,
112 niter,
113 build_int_cst (type, 1));
114 incr_at = gsi_last_bb (in->src);
115 create_iv (niter,
116 build_int_cst (type, -1),
117 NULL_TREE, loop,
118 &incr_at, false, var_before, &var);
119 if (var_after)
120 *var_after = var;
121
122 cmp = (exit->flags & EDGE_TRUE_VALUE) ? EQ_EXPR : NE_EXPR;
123 gimple_cond_set_code (cond, cmp);
124 gimple_cond_set_lhs (cond, var);
125 gimple_cond_set_rhs (cond, build_int_cst (type, 0));
126 update_stmt (cond);
127 }
128
129 /* Describe size of loop as detected by tree_estimate_loop_size. */
130 struct loop_size
131 {
132 /* Number of instructions in the loop. */
133 int overall;
134
135 /* Number of instructions that will be likely optimized out in
136 peeled iterations of loop (i.e. computation based on induction
137 variable where induction variable starts at known constant.) */
138 int eliminated_by_peeling;
139
140 /* Same statistics for last iteration of loop: it is smaller because
141 instructions after exit are not executed. */
142 int last_iteration;
143 int last_iteration_eliminated_by_peeling;
144
145 /* If some IV computation will become constant. */
146 bool constant_iv;
147
148 /* Number of call stmts that are not a builtin and are pure or const
149 present on the hot path. */
150 int num_pure_calls_on_hot_path;
151 /* Number of call stmts that are not a builtin and are not pure nor const
152 present on the hot path. */
153 int num_non_pure_calls_on_hot_path;
154 /* Number of statements other than calls in the loop. */
155 int non_call_stmts_on_hot_path;
156 /* Number of branches seen on the hot path. */
157 int num_branches_on_hot_path;
158 };
159
160 /* Return true if OP in STMT will be constant after peeling LOOP. */
161
162 static bool
constant_after_peeling(tree op,gimple * stmt,struct loop * loop)163 constant_after_peeling (tree op, gimple *stmt, struct loop *loop)
164 {
165 if (is_gimple_min_invariant (op))
166 return true;
167
168 /* We can still fold accesses to constant arrays when index is known. */
169 if (TREE_CODE (op) != SSA_NAME)
170 {
171 tree base = op;
172
173 /* First make fast look if we see constant array inside. */
174 while (handled_component_p (base))
175 base = TREE_OPERAND (base, 0);
176 if ((DECL_P (base)
177 && ctor_for_folding (base) != error_mark_node)
178 || CONSTANT_CLASS_P (base))
179 {
180 /* If so, see if we understand all the indices. */
181 base = op;
182 while (handled_component_p (base))
183 {
184 if (TREE_CODE (base) == ARRAY_REF
185 && !constant_after_peeling (TREE_OPERAND (base, 1), stmt, loop))
186 return false;
187 base = TREE_OPERAND (base, 0);
188 }
189 return true;
190 }
191 return false;
192 }
193
194 /* Induction variables are constants when defined in loop. */
195 if (loop_containing_stmt (stmt) != loop)
196 return false;
197 tree ev = analyze_scalar_evolution (loop, op);
198 if (chrec_contains_undetermined (ev)
199 || chrec_contains_symbols (ev))
200 return false;
201 return true;
202 }
203
204 /* Computes an estimated number of insns in LOOP.
205 EXIT (if non-NULL) is an exite edge that will be eliminated in all but last
206 iteration of the loop.
207 EDGE_TO_CANCEL (if non-NULL) is an non-exit edge eliminated in the last iteration
208 of loop.
209 Return results in SIZE, estimate benefits for complete unrolling exiting by EXIT.
210 Stop estimating after UPPER_BOUND is met. Return true in this case. */
211
212 static bool
tree_estimate_loop_size(struct loop * loop,edge exit,edge edge_to_cancel,struct loop_size * size,int upper_bound)213 tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel,
214 struct loop_size *size, int upper_bound)
215 {
216 basic_block *body = get_loop_body (loop);
217 gimple_stmt_iterator gsi;
218 unsigned int i;
219 bool after_exit;
220 vec<basic_block> path = get_loop_hot_path (loop);
221
222 size->overall = 0;
223 size->eliminated_by_peeling = 0;
224 size->last_iteration = 0;
225 size->last_iteration_eliminated_by_peeling = 0;
226 size->num_pure_calls_on_hot_path = 0;
227 size->num_non_pure_calls_on_hot_path = 0;
228 size->non_call_stmts_on_hot_path = 0;
229 size->num_branches_on_hot_path = 0;
230 size->constant_iv = 0;
231
232 if (dump_file && (dump_flags & TDF_DETAILS))
233 fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num);
234 for (i = 0; i < loop->num_nodes; i++)
235 {
236 if (edge_to_cancel && body[i] != edge_to_cancel->src
237 && dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src))
238 after_exit = true;
239 else
240 after_exit = false;
241 if (dump_file && (dump_flags & TDF_DETAILS))
242 fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index,
243 after_exit);
244
245 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
246 {
247 gimple *stmt = gsi_stmt (gsi);
248 int num = estimate_num_insns (stmt, &eni_size_weights);
249 bool likely_eliminated = false;
250 bool likely_eliminated_last = false;
251 bool likely_eliminated_peeled = false;
252
253 if (dump_file && (dump_flags & TDF_DETAILS))
254 {
255 fprintf (dump_file, " size: %3i ", num);
256 print_gimple_stmt (dump_file, gsi_stmt (gsi), 0);
257 }
258
259 /* Look for reasons why we might optimize this stmt away. */
260
261 if (!gimple_has_side_effects (stmt))
262 {
263 /* Exit conditional. */
264 if (exit && body[i] == exit->src
265 && stmt == last_stmt (exit->src))
266 {
267 if (dump_file && (dump_flags & TDF_DETAILS))
268 fprintf (dump_file, " Exit condition will be eliminated "
269 "in peeled copies.\n");
270 likely_eliminated_peeled = true;
271 }
272 if (edge_to_cancel && body[i] == edge_to_cancel->src
273 && stmt == last_stmt (edge_to_cancel->src))
274 {
275 if (dump_file && (dump_flags & TDF_DETAILS))
276 fprintf (dump_file, " Exit condition will be eliminated "
277 "in last copy.\n");
278 likely_eliminated_last = true;
279 }
280 /* Sets of IV variables */
281 if (gimple_code (stmt) == GIMPLE_ASSIGN
282 && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop))
283 {
284 if (dump_file && (dump_flags & TDF_DETAILS))
285 fprintf (dump_file, " Induction variable computation will"
286 " be folded away.\n");
287 likely_eliminated = true;
288 }
289 /* Assignments of IV variables. */
290 else if (gimple_code (stmt) == GIMPLE_ASSIGN
291 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
292 && constant_after_peeling (gimple_assign_rhs1 (stmt),
293 stmt, loop)
294 && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS
295 || constant_after_peeling (gimple_assign_rhs2 (stmt),
296 stmt, loop)))
297 {
298 size->constant_iv = true;
299 if (dump_file && (dump_flags & TDF_DETAILS))
300 fprintf (dump_file,
301 " Constant expression will be folded away.\n");
302 likely_eliminated = true;
303 }
304 /* Conditionals. */
305 else if ((gimple_code (stmt) == GIMPLE_COND
306 && constant_after_peeling (gimple_cond_lhs (stmt), stmt,
307 loop)
308 && constant_after_peeling (gimple_cond_rhs (stmt), stmt,
309 loop)
310 /* We don't simplify all constant compares so make sure
311 they are not both constant already. See PR70288. */
312 && (! is_gimple_min_invariant (gimple_cond_lhs (stmt))
313 || ! is_gimple_min_invariant
314 (gimple_cond_rhs (stmt))))
315 || (gimple_code (stmt) == GIMPLE_SWITCH
316 && constant_after_peeling (gimple_switch_index (
317 as_a <gswitch *>
318 (stmt)),
319 stmt, loop)
320 && ! is_gimple_min_invariant
321 (gimple_switch_index
322 (as_a <gswitch *> (stmt)))))
323 {
324 if (dump_file && (dump_flags & TDF_DETAILS))
325 fprintf (dump_file, " Constant conditional.\n");
326 likely_eliminated = true;
327 }
328 }
329
330 size->overall += num;
331 if (likely_eliminated || likely_eliminated_peeled)
332 size->eliminated_by_peeling += num;
333 if (!after_exit)
334 {
335 size->last_iteration += num;
336 if (likely_eliminated || likely_eliminated_last)
337 size->last_iteration_eliminated_by_peeling += num;
338 }
339 if ((size->overall * 3 / 2 - size->eliminated_by_peeling
340 - size->last_iteration_eliminated_by_peeling) > upper_bound)
341 {
342 free (body);
343 path.release ();
344 return true;
345 }
346 }
347 }
348 while (path.length ())
349 {
350 basic_block bb = path.pop ();
351 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
352 {
353 gimple *stmt = gsi_stmt (gsi);
354 if (gimple_code (stmt) == GIMPLE_CALL
355 && !gimple_inexpensive_call_p (as_a <gcall *> (stmt)))
356 {
357 int flags = gimple_call_flags (stmt);
358 if (flags & (ECF_PURE | ECF_CONST))
359 size->num_pure_calls_on_hot_path++;
360 else
361 size->num_non_pure_calls_on_hot_path++;
362 size->num_branches_on_hot_path ++;
363 }
364 /* Count inexpensive calls as non-calls, because they will likely
365 expand inline. */
366 else if (gimple_code (stmt) != GIMPLE_DEBUG)
367 size->non_call_stmts_on_hot_path++;
368 if (((gimple_code (stmt) == GIMPLE_COND
369 && (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
370 || !constant_after_peeling (gimple_cond_rhs (stmt), stmt,
371 loop)))
372 || (gimple_code (stmt) == GIMPLE_SWITCH
373 && !constant_after_peeling (gimple_switch_index (
374 as_a <gswitch *> (stmt)),
375 stmt, loop)))
376 && (!exit || bb != exit->src))
377 size->num_branches_on_hot_path++;
378 }
379 }
380 path.release ();
381 if (dump_file && (dump_flags & TDF_DETAILS))
382 fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall,
383 size->eliminated_by_peeling, size->last_iteration,
384 size->last_iteration_eliminated_by_peeling);
385
386 free (body);
387 return false;
388 }
389
390 /* Estimate number of insns of completely unrolled loop.
391 It is (NUNROLL + 1) * size of loop body with taking into account
392 the fact that in last copy everything after exit conditional
393 is dead and that some instructions will be eliminated after
394 peeling.
395
396 Loop body is likely going to simplify further, this is difficult
397 to guess, we just decrease the result by 1/3. */
398
399 static unsigned HOST_WIDE_INT
estimated_unrolled_size(struct loop_size * size,unsigned HOST_WIDE_INT nunroll)400 estimated_unrolled_size (struct loop_size *size,
401 unsigned HOST_WIDE_INT nunroll)
402 {
403 HOST_WIDE_INT unr_insns = ((nunroll)
404 * (HOST_WIDE_INT) (size->overall
405 - size->eliminated_by_peeling));
406 if (!nunroll)
407 unr_insns = 0;
408 unr_insns += size->last_iteration - size->last_iteration_eliminated_by_peeling;
409
410 unr_insns = unr_insns * 2 / 3;
411 if (unr_insns <= 0)
412 unr_insns = 1;
413
414 return unr_insns;
415 }
416
417 /* Loop LOOP is known to not loop. See if there is an edge in the loop
418 body that can be remove to make the loop to always exit and at
419 the same time it does not make any code potentially executed
420 during the last iteration dead.
421
422 After complete unrolling we still may get rid of the conditional
423 on the exit in the last copy even if we have no idea what it does.
424 This is quite common case for loops of form
425
426 int a[5];
427 for (i=0;i<b;i++)
428 a[i]=0;
429
430 Here we prove the loop to iterate 5 times but we do not know
431 it from induction variable.
432
433 For now we handle only simple case where there is exit condition
434 just before the latch block and the latch block contains no statements
435 with side effect that may otherwise terminate the execution of loop
436 (such as by EH or by terminating the program or longjmp).
437
438 In the general case we may want to cancel the paths leading to statements
439 loop-niter identified as having undefined effect in the last iteration.
440 The other cases are hopefully rare and will be cleaned up later. */
441
442 static edge
loop_edge_to_cancel(struct loop * loop)443 loop_edge_to_cancel (struct loop *loop)
444 {
445 vec<edge> exits;
446 unsigned i;
447 edge edge_to_cancel;
448 gimple_stmt_iterator gsi;
449
450 /* We want only one predecestor of the loop. */
451 if (EDGE_COUNT (loop->latch->preds) > 1)
452 return NULL;
453
454 exits = get_loop_exit_edges (loop);
455
456 FOR_EACH_VEC_ELT (exits, i, edge_to_cancel)
457 {
458 /* Find the other edge than the loop exit
459 leaving the conditoinal. */
460 if (EDGE_COUNT (edge_to_cancel->src->succs) != 2)
461 continue;
462 if (EDGE_SUCC (edge_to_cancel->src, 0) == edge_to_cancel)
463 edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 1);
464 else
465 edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 0);
466
467 /* We only can handle conditionals. */
468 if (!(edge_to_cancel->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
469 continue;
470
471 /* We should never have conditionals in the loop latch. */
472 gcc_assert (edge_to_cancel->dest != loop->header);
473
474 /* Check that it leads to loop latch. */
475 if (edge_to_cancel->dest != loop->latch)
476 continue;
477
478 exits.release ();
479
480 /* Verify that the code in loop latch does nothing that may end program
481 execution without really reaching the exit. This may include
482 non-pure/const function calls, EH statements, volatile ASMs etc. */
483 for (gsi = gsi_start_bb (loop->latch); !gsi_end_p (gsi); gsi_next (&gsi))
484 if (gimple_has_side_effects (gsi_stmt (gsi)))
485 return NULL;
486 return edge_to_cancel;
487 }
488 exits.release ();
489 return NULL;
490 }
491
492 /* Remove all tests for exits that are known to be taken after LOOP was
493 peeled NPEELED times. Put gcc_unreachable before every statement
494 known to not be executed. */
495
496 static bool
remove_exits_and_undefined_stmts(struct loop * loop,unsigned int npeeled)497 remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled)
498 {
499 struct nb_iter_bound *elt;
500 bool changed = false;
501
502 for (elt = loop->bounds; elt; elt = elt->next)
503 {
504 /* If statement is known to be undefined after peeling, turn it
505 into unreachable (or trap when debugging experience is supposed
506 to be good). */
507 if (!elt->is_exit
508 && wi::ltu_p (elt->bound, npeeled))
509 {
510 gimple_stmt_iterator gsi = gsi_for_stmt (elt->stmt);
511 gcall *stmt = gimple_build_call
512 (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
513 gimple_set_location (stmt, gimple_location (elt->stmt));
514 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
515 split_block (gimple_bb (stmt), stmt);
516 changed = true;
517 if (dump_file && (dump_flags & TDF_DETAILS))
518 {
519 fprintf (dump_file, "Forced statement unreachable: ");
520 print_gimple_stmt (dump_file, elt->stmt, 0);
521 }
522 }
523 /* If we know the exit will be taken after peeling, update. */
524 else if (elt->is_exit
525 && wi::leu_p (elt->bound, npeeled))
526 {
527 basic_block bb = gimple_bb (elt->stmt);
528 edge exit_edge = EDGE_SUCC (bb, 0);
529
530 if (dump_file && (dump_flags & TDF_DETAILS))
531 {
532 fprintf (dump_file, "Forced exit to be taken: ");
533 print_gimple_stmt (dump_file, elt->stmt, 0);
534 }
535 if (!loop_exit_edge_p (loop, exit_edge))
536 exit_edge = EDGE_SUCC (bb, 1);
537 exit_edge->probability = profile_probability::always ();
538 gcc_checking_assert (loop_exit_edge_p (loop, exit_edge));
539 gcond *cond_stmt = as_a <gcond *> (elt->stmt);
540 if (exit_edge->flags & EDGE_TRUE_VALUE)
541 gimple_cond_make_true (cond_stmt);
542 else
543 gimple_cond_make_false (cond_stmt);
544 update_stmt (cond_stmt);
545 changed = true;
546 }
547 }
548 return changed;
549 }
550
551 /* Remove all exits that are known to be never taken because of the loop bound
552 discovered. */
553
554 static bool
remove_redundant_iv_tests(struct loop * loop)555 remove_redundant_iv_tests (struct loop *loop)
556 {
557 struct nb_iter_bound *elt;
558 bool changed = false;
559
560 if (!loop->any_upper_bound)
561 return false;
562 for (elt = loop->bounds; elt; elt = elt->next)
563 {
564 /* Exit is pointless if it won't be taken before loop reaches
565 upper bound. */
566 if (elt->is_exit && loop->any_upper_bound
567 && wi::ltu_p (loop->nb_iterations_upper_bound, elt->bound))
568 {
569 basic_block bb = gimple_bb (elt->stmt);
570 edge exit_edge = EDGE_SUCC (bb, 0);
571 struct tree_niter_desc niter;
572
573 if (!loop_exit_edge_p (loop, exit_edge))
574 exit_edge = EDGE_SUCC (bb, 1);
575
576 /* Only when we know the actual number of iterations, not
577 just a bound, we can remove the exit. */
578 if (!number_of_iterations_exit (loop, exit_edge,
579 &niter, false, false)
580 || !integer_onep (niter.assumptions)
581 || !integer_zerop (niter.may_be_zero)
582 || !niter.niter
583 || TREE_CODE (niter.niter) != INTEGER_CST
584 || !wi::ltu_p (loop->nb_iterations_upper_bound,
585 wi::to_widest (niter.niter)))
586 continue;
587
588 if (dump_file && (dump_flags & TDF_DETAILS))
589 {
590 fprintf (dump_file, "Removed pointless exit: ");
591 print_gimple_stmt (dump_file, elt->stmt, 0);
592 }
593 gcond *cond_stmt = as_a <gcond *> (elt->stmt);
594 if (exit_edge->flags & EDGE_TRUE_VALUE)
595 gimple_cond_make_false (cond_stmt);
596 else
597 gimple_cond_make_true (cond_stmt);
598 update_stmt (cond_stmt);
599 changed = true;
600 }
601 }
602 return changed;
603 }
604
605 /* Stores loops that will be unlooped and edges that will be removed
606 after we process whole loop tree. */
607 static vec<loop_p> loops_to_unloop;
608 static vec<int> loops_to_unloop_nunroll;
609 static vec<edge> edges_to_remove;
610 /* Stores loops that has been peeled. */
611 static bitmap peeled_loops;
612
613 /* Cancel all fully unrolled loops by putting __builtin_unreachable
614 on the latch edge.
615 We do it after all unrolling since unlooping moves basic blocks
616 across loop boundaries trashing loop closed SSA form as well
617 as SCEV info needed to be intact during unrolling.
618
619 IRRED_INVALIDATED is used to bookkeep if information about
620 irreducible regions may become invalid as a result
621 of the transformation.
622 LOOP_CLOSED_SSA_INVALIDATED is used to bookkepp the case
623 when we need to go into loop closed SSA form. */
624
625 static void
unloop_loops(bitmap loop_closed_ssa_invalidated,bool * irred_invalidated)626 unloop_loops (bitmap loop_closed_ssa_invalidated,
627 bool *irred_invalidated)
628 {
629 while (loops_to_unloop.length ())
630 {
631 struct loop *loop = loops_to_unloop.pop ();
632 int n_unroll = loops_to_unloop_nunroll.pop ();
633 basic_block latch = loop->latch;
634 edge latch_edge = loop_latch_edge (loop);
635 int flags = latch_edge->flags;
636 location_t locus = latch_edge->goto_locus;
637 gcall *stmt;
638 gimple_stmt_iterator gsi;
639
640 remove_exits_and_undefined_stmts (loop, n_unroll);
641
642 /* Unloop destroys the latch edge. */
643 unloop (loop, irred_invalidated, loop_closed_ssa_invalidated);
644
645 /* Create new basic block for the latch edge destination and wire
646 it in. */
647 stmt = gimple_build_call (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
648 latch_edge = make_edge (latch, create_basic_block (NULL, NULL, latch), flags);
649 latch_edge->probability = profile_probability::never ();
650 latch_edge->flags |= flags;
651 latch_edge->goto_locus = locus;
652
653 add_bb_to_loop (latch_edge->dest, current_loops->tree_root);
654 latch_edge->dest->count = profile_count::zero ();
655 set_immediate_dominator (CDI_DOMINATORS, latch_edge->dest, latch_edge->src);
656
657 gsi = gsi_start_bb (latch_edge->dest);
658 gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
659 }
660 loops_to_unloop.release ();
661 loops_to_unloop_nunroll.release ();
662
663 /* Remove edges in peeled copies. Given remove_path removes dominated
664 regions we need to cope with removal of already removed paths. */
665 unsigned i;
666 edge e;
667 auto_vec<int, 20> src_bbs;
668 src_bbs.reserve_exact (edges_to_remove.length ());
669 FOR_EACH_VEC_ELT (edges_to_remove, i, e)
670 src_bbs.quick_push (e->src->index);
671 FOR_EACH_VEC_ELT (edges_to_remove, i, e)
672 if (BASIC_BLOCK_FOR_FN (cfun, src_bbs[i]))
673 {
674 bool ok = remove_path (e, irred_invalidated,
675 loop_closed_ssa_invalidated);
676 gcc_assert (ok);
677 }
678 edges_to_remove.release ();
679 }
680
681 /* Tries to unroll LOOP completely, i.e. NITER times.
682 UL determines which loops we are allowed to unroll.
683 EXIT is the exit of the loop that should be eliminated.
684 MAXITER specfy bound on number of iterations, -1 if it is
685 not known or too large for HOST_WIDE_INT. The location
686 LOCUS corresponding to the loop is used when emitting
687 a summary of the unroll to the dump file. */
688
689 static bool
try_unroll_loop_completely(struct loop * loop,edge exit,tree niter,bool may_be_zero,enum unroll_level ul,HOST_WIDE_INT maxiter,location_t locus,bool allow_peel)690 try_unroll_loop_completely (struct loop *loop,
691 edge exit, tree niter, bool may_be_zero,
692 enum unroll_level ul,
693 HOST_WIDE_INT maxiter,
694 location_t locus, bool allow_peel)
695 {
696 unsigned HOST_WIDE_INT n_unroll = 0;
697 bool n_unroll_found = false;
698 edge edge_to_cancel = NULL;
699
700 /* See if we proved number of iterations to be low constant.
701
702 EXIT is an edge that will be removed in all but last iteration of
703 the loop.
704
705 EDGE_TO_CACNEL is an edge that will be removed from the last iteration
706 of the unrolled sequence and is expected to make the final loop not
707 rolling.
708
709 If the number of execution of loop is determined by standard induction
710 variable test, then EXIT and EDGE_TO_CANCEL are the two edges leaving
711 from the iv test. */
712 if (tree_fits_uhwi_p (niter))
713 {
714 n_unroll = tree_to_uhwi (niter);
715 n_unroll_found = true;
716 edge_to_cancel = EDGE_SUCC (exit->src, 0);
717 if (edge_to_cancel == exit)
718 edge_to_cancel = EDGE_SUCC (exit->src, 1);
719 }
720 /* We do not know the number of iterations and thus we can not eliminate
721 the EXIT edge. */
722 else
723 exit = NULL;
724
725 /* See if we can improve our estimate by using recorded loop bounds. */
726 if ((allow_peel || maxiter == 0 || ul == UL_NO_GROWTH)
727 && maxiter >= 0
728 && (!n_unroll_found || (unsigned HOST_WIDE_INT)maxiter < n_unroll))
729 {
730 n_unroll = maxiter;
731 n_unroll_found = true;
732 /* Loop terminates before the IV variable test, so we can not
733 remove it in the last iteration. */
734 edge_to_cancel = NULL;
735 }
736
737 if (!n_unroll_found)
738 return false;
739
740 if (!loop->unroll
741 && n_unroll > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES))
742 {
743 if (dump_file && (dump_flags & TDF_DETAILS))
744 fprintf (dump_file, "Not unrolling loop %d "
745 "(--param max-completely-peel-times limit reached).\n",
746 loop->num);
747 return false;
748 }
749
750 if (!edge_to_cancel)
751 edge_to_cancel = loop_edge_to_cancel (loop);
752
753 if (n_unroll)
754 {
755 if (ul == UL_SINGLE_ITER)
756 return false;
757
758 if (loop->unroll)
759 {
760 /* If the unrolling factor is too large, bail out. */
761 if (n_unroll > (unsigned)loop->unroll)
762 {
763 if (dump_file && (dump_flags & TDF_DETAILS))
764 fprintf (dump_file,
765 "Not unrolling loop %d: "
766 "user didn't want it unrolled completely.\n",
767 loop->num);
768 return false;
769 }
770 }
771 else
772 {
773 struct loop_size size;
774 /* EXIT can be removed only if we are sure it passes first N_UNROLL
775 iterations. */
776 bool remove_exit = (exit && niter
777 && TREE_CODE (niter) == INTEGER_CST
778 && wi::leu_p (n_unroll, wi::to_widest (niter)));
779 bool large
780 = tree_estimate_loop_size
781 (loop, remove_exit ? exit : NULL, edge_to_cancel, &size,
782 PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS));
783 if (large)
784 {
785 if (dump_file && (dump_flags & TDF_DETAILS))
786 fprintf (dump_file, "Not unrolling loop %d: it is too large.\n",
787 loop->num);
788 return false;
789 }
790
791 unsigned HOST_WIDE_INT ninsns = size.overall;
792 unsigned HOST_WIDE_INT unr_insns
793 = estimated_unrolled_size (&size, n_unroll);
794 if (dump_file && (dump_flags & TDF_DETAILS))
795 {
796 fprintf (dump_file, " Loop size: %d\n", (int) ninsns);
797 fprintf (dump_file, " Estimated size after unrolling: %d\n",
798 (int) unr_insns);
799 }
800
801 /* If the code is going to shrink, we don't need to be extra
802 cautious on guessing if the unrolling is going to be
803 profitable. */
804 if (unr_insns
805 /* If there is IV variable that will become constant, we
806 save one instruction in the loop prologue we do not
807 account otherwise. */
808 <= ninsns + (size.constant_iv != false))
809 ;
810 /* We unroll only inner loops, because we do not consider it
811 profitable otheriwse. We still can cancel loopback edge
812 of not rolling loop; this is always a good idea. */
813 else if (ul == UL_NO_GROWTH)
814 {
815 if (dump_file && (dump_flags & TDF_DETAILS))
816 fprintf (dump_file, "Not unrolling loop %d: size would grow.\n",
817 loop->num);
818 return false;
819 }
820 /* Outer loops tend to be less interesting candidates for
821 complete unrolling unless we can do a lot of propagation
822 into the inner loop body. For now we disable outer loop
823 unrolling when the code would grow. */
824 else if (loop->inner)
825 {
826 if (dump_file && (dump_flags & TDF_DETAILS))
827 fprintf (dump_file, "Not unrolling loop %d: "
828 "it is not innermost and code would grow.\n",
829 loop->num);
830 return false;
831 }
832 /* If there is call on a hot path through the loop, then
833 there is most probably not much to optimize. */
834 else if (size.num_non_pure_calls_on_hot_path)
835 {
836 if (dump_file && (dump_flags & TDF_DETAILS))
837 fprintf (dump_file, "Not unrolling loop %d: "
838 "contains call and code would grow.\n",
839 loop->num);
840 return false;
841 }
842 /* If there is pure/const call in the function, then we can
843 still optimize the unrolled loop body if it contains some
844 other interesting code than the calls and code storing or
845 cumulating the return value. */
846 else if (size.num_pure_calls_on_hot_path
847 /* One IV increment, one test, one ivtmp store and
848 one useful stmt. That is about minimal loop
849 doing pure call. */
850 && (size.non_call_stmts_on_hot_path
851 <= 3 + size.num_pure_calls_on_hot_path))
852 {
853 if (dump_file && (dump_flags & TDF_DETAILS))
854 fprintf (dump_file, "Not unrolling loop %d: "
855 "contains just pure calls and code would grow.\n",
856 loop->num);
857 return false;
858 }
859 /* Complete unrolling is major win when control flow is
860 removed and one big basic block is created. If the loop
861 contains control flow the optimization may still be a win
862 because of eliminating the loop overhead but it also may
863 blow the branch predictor tables. Limit number of
864 branches on the hot path through the peeled sequence. */
865 else if (size.num_branches_on_hot_path * (int)n_unroll
866 > PARAM_VALUE (PARAM_MAX_PEEL_BRANCHES))
867 {
868 if (dump_file && (dump_flags & TDF_DETAILS))
869 fprintf (dump_file, "Not unrolling loop %d: "
870 "number of branches on hot path in the unrolled "
871 "sequence reaches --param max-peel-branches limit.\n",
872 loop->num);
873 return false;
874 }
875 else if (unr_insns
876 > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS))
877 {
878 if (dump_file && (dump_flags & TDF_DETAILS))
879 fprintf (dump_file, "Not unrolling loop %d: "
880 "number of insns in the unrolled sequence reaches "
881 "--param max-completely-peeled-insns limit.\n",
882 loop->num);
883 return false;
884 }
885 }
886
887 initialize_original_copy_tables ();
888 auto_sbitmap wont_exit (n_unroll + 1);
889 if (exit && niter
890 && TREE_CODE (niter) == INTEGER_CST
891 && wi::leu_p (n_unroll, wi::to_widest (niter)))
892 {
893 bitmap_ones (wont_exit);
894 if (wi::eq_p (wi::to_widest (niter), n_unroll)
895 || edge_to_cancel)
896 bitmap_clear_bit (wont_exit, 0);
897 }
898 else
899 {
900 exit = NULL;
901 bitmap_clear (wont_exit);
902 }
903 if (may_be_zero)
904 bitmap_clear_bit (wont_exit, 1);
905
906 if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
907 n_unroll, wont_exit,
908 exit, &edges_to_remove,
909 DLTHE_FLAG_UPDATE_FREQ
910 | DLTHE_FLAG_COMPLETTE_PEEL))
911 {
912 free_original_copy_tables ();
913 if (dump_file && (dump_flags & TDF_DETAILS))
914 fprintf (dump_file, "Failed to duplicate the loop\n");
915 return false;
916 }
917
918 free_original_copy_tables ();
919 }
920
921 /* Remove the conditional from the last copy of the loop. */
922 if (edge_to_cancel)
923 {
924 gcond *cond = as_a <gcond *> (last_stmt (edge_to_cancel->src));
925 force_edge_cold (edge_to_cancel, true);
926 if (edge_to_cancel->flags & EDGE_TRUE_VALUE)
927 gimple_cond_make_false (cond);
928 else
929 gimple_cond_make_true (cond);
930 update_stmt (cond);
931 /* Do not remove the path, as doing so may remove outer loop and
932 confuse bookkeeping code in tree_unroll_loops_completely. */
933 }
934
935 /* Store the loop for later unlooping and exit removal. */
936 loops_to_unloop.safe_push (loop);
937 loops_to_unloop_nunroll.safe_push (n_unroll);
938
939 if (dump_enabled_p ())
940 {
941 if (!n_unroll)
942 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus,
943 "loop turned into non-loop; it never loops\n");
944 else
945 {
946 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus,
947 "loop with %d iterations completely unrolled",
948 (int) n_unroll);
949 if (loop->header->count.initialized_p ())
950 dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS,
951 " (header execution count %d)",
952 (int)loop->header->count.to_gcov_type ());
953 dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, "\n");
954 }
955 }
956
957 if (dump_file && (dump_flags & TDF_DETAILS))
958 {
959 if (exit)
960 fprintf (dump_file, "Exit condition of peeled iterations was "
961 "eliminated.\n");
962 if (edge_to_cancel)
963 fprintf (dump_file, "Last iteration exit edge was proved true.\n");
964 else
965 fprintf (dump_file, "Latch of last iteration was marked by "
966 "__builtin_unreachable ().\n");
967 }
968
969 return true;
970 }
971
972 /* Return number of instructions after peeling. */
973 static unsigned HOST_WIDE_INT
estimated_peeled_sequence_size(struct loop_size * size,unsigned HOST_WIDE_INT npeel)974 estimated_peeled_sequence_size (struct loop_size *size,
975 unsigned HOST_WIDE_INT npeel)
976 {
977 return MAX (npeel * (HOST_WIDE_INT) (size->overall
978 - size->eliminated_by_peeling), 1);
979 }
980
981 /* If the loop is expected to iterate N times and is
982 small enough, duplicate the loop body N+1 times before
983 the loop itself. This way the hot path will never
984 enter the loop.
985 Parameters are the same as for try_unroll_loops_completely */
986
987 static bool
try_peel_loop(struct loop * loop,edge exit,tree niter,bool may_be_zero,HOST_WIDE_INT maxiter)988 try_peel_loop (struct loop *loop,
989 edge exit, tree niter, bool may_be_zero,
990 HOST_WIDE_INT maxiter)
991 {
992 HOST_WIDE_INT npeel;
993 struct loop_size size;
994 int peeled_size;
995
996 if (!flag_peel_loops
997 || PARAM_VALUE (PARAM_MAX_PEEL_TIMES) <= 0
998 || !peeled_loops)
999 return false;
1000
1001 if (bitmap_bit_p (peeled_loops, loop->num))
1002 {
1003 if (dump_file)
1004 fprintf (dump_file, "Not peeling: loop is already peeled\n");
1005 return false;
1006 }
1007
1008 /* We don't peel loops that will be unrolled as this can duplicate a
1009 loop more times than the user requested. */
1010 if (loop->unroll)
1011 {
1012 if (dump_file)
1013 fprintf (dump_file, "Not peeling: user didn't want it peeled.\n");
1014 return false;
1015 }
1016
1017 /* Peel only innermost loops.
1018 While the code is perfectly capable of peeling non-innermost loops,
1019 the heuristics would probably need some improvements. */
1020 if (loop->inner)
1021 {
1022 if (dump_file)
1023 fprintf (dump_file, "Not peeling: outer loop\n");
1024 return false;
1025 }
1026
1027 if (!optimize_loop_for_speed_p (loop))
1028 {
1029 if (dump_file)
1030 fprintf (dump_file, "Not peeling: cold loop\n");
1031 return false;
1032 }
1033
1034 /* Check if there is an estimate on the number of iterations. */
1035 npeel = estimated_loop_iterations_int (loop);
1036 if (npeel < 0)
1037 npeel = likely_max_loop_iterations_int (loop);
1038 if (npeel < 0)
1039 {
1040 if (dump_file)
1041 fprintf (dump_file, "Not peeling: number of iterations is not "
1042 "estimated\n");
1043 return false;
1044 }
1045 if (maxiter >= 0 && maxiter <= npeel)
1046 {
1047 if (dump_file)
1048 fprintf (dump_file, "Not peeling: upper bound is known so can "
1049 "unroll completely\n");
1050 return false;
1051 }
1052
1053 /* We want to peel estimated number of iterations + 1 (so we never
1054 enter the loop on quick path). Check against PARAM_MAX_PEEL_TIMES
1055 and be sure to avoid overflows. */
1056 if (npeel > PARAM_VALUE (PARAM_MAX_PEEL_TIMES) - 1)
1057 {
1058 if (dump_file)
1059 fprintf (dump_file, "Not peeling: rolls too much "
1060 "(%i + 1 > --param max-peel-times)\n", (int) npeel);
1061 return false;
1062 }
1063 npeel++;
1064
1065 /* Check peeled loops size. */
1066 tree_estimate_loop_size (loop, exit, NULL, &size,
1067 PARAM_VALUE (PARAM_MAX_PEELED_INSNS));
1068 if ((peeled_size = estimated_peeled_sequence_size (&size, (int) npeel))
1069 > PARAM_VALUE (PARAM_MAX_PEELED_INSNS))
1070 {
1071 if (dump_file)
1072 fprintf (dump_file, "Not peeling: peeled sequence size is too large "
1073 "(%i insns > --param max-peel-insns)", peeled_size);
1074 return false;
1075 }
1076
1077 /* Duplicate possibly eliminating the exits. */
1078 initialize_original_copy_tables ();
1079 auto_sbitmap wont_exit (npeel + 1);
1080 if (exit && niter
1081 && TREE_CODE (niter) == INTEGER_CST
1082 && wi::leu_p (npeel, wi::to_widest (niter)))
1083 {
1084 bitmap_ones (wont_exit);
1085 bitmap_clear_bit (wont_exit, 0);
1086 }
1087 else
1088 {
1089 exit = NULL;
1090 bitmap_clear (wont_exit);
1091 }
1092 if (may_be_zero)
1093 bitmap_clear_bit (wont_exit, 1);
1094 if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
1095 npeel, wont_exit,
1096 exit, &edges_to_remove,
1097 DLTHE_FLAG_UPDATE_FREQ))
1098 {
1099 free_original_copy_tables ();
1100 return false;
1101 }
1102 free_original_copy_tables ();
1103 if (dump_file && (dump_flags & TDF_DETAILS))
1104 {
1105 fprintf (dump_file, "Peeled loop %d, %i times.\n",
1106 loop->num, (int) npeel);
1107 }
1108 if (loop->any_estimate)
1109 {
1110 if (wi::ltu_p (npeel, loop->nb_iterations_estimate))
1111 loop->nb_iterations_estimate -= npeel;
1112 else
1113 loop->nb_iterations_estimate = 0;
1114 }
1115 if (loop->any_upper_bound)
1116 {
1117 if (wi::ltu_p (npeel, loop->nb_iterations_upper_bound))
1118 loop->nb_iterations_upper_bound -= npeel;
1119 else
1120 loop->nb_iterations_upper_bound = 0;
1121 }
1122 if (loop->any_likely_upper_bound)
1123 {
1124 if (wi::ltu_p (npeel, loop->nb_iterations_likely_upper_bound))
1125 loop->nb_iterations_likely_upper_bound -= npeel;
1126 else
1127 {
1128 loop->any_estimate = true;
1129 loop->nb_iterations_estimate = 0;
1130 loop->nb_iterations_likely_upper_bound = 0;
1131 }
1132 }
1133 profile_count entry_count = profile_count::zero ();
1134
1135 edge e;
1136 edge_iterator ei;
1137 FOR_EACH_EDGE (e, ei, loop->header->preds)
1138 if (e->src != loop->latch)
1139 {
1140 if (e->src->count.initialized_p ())
1141 entry_count = e->src->count + e->src->count;
1142 gcc_assert (!flow_bb_inside_loop_p (loop, e->src));
1143 }
1144 profile_probability p = profile_probability::very_unlikely ();
1145 p = entry_count.probability_in (loop->header->count);
1146 scale_loop_profile (loop, p, 0);
1147 bitmap_set_bit (peeled_loops, loop->num);
1148 return true;
1149 }
1150 /* Adds a canonical induction variable to LOOP if suitable.
1151 CREATE_IV is true if we may create a new iv. UL determines
1152 which loops we are allowed to completely unroll. If TRY_EVAL is true, we try
1153 to determine the number of iterations of a loop by direct evaluation.
1154 Returns true if cfg is changed. */
1155
1156 static bool
canonicalize_loop_induction_variables(struct loop * loop,bool create_iv,enum unroll_level ul,bool try_eval,bool allow_peel)1157 canonicalize_loop_induction_variables (struct loop *loop,
1158 bool create_iv, enum unroll_level ul,
1159 bool try_eval, bool allow_peel)
1160 {
1161 edge exit = NULL;
1162 tree niter;
1163 HOST_WIDE_INT maxiter;
1164 bool modified = false;
1165 location_t locus = UNKNOWN_LOCATION;
1166 struct tree_niter_desc niter_desc;
1167 bool may_be_zero = false;
1168
1169 /* For unrolling allow conditional constant or zero iterations, thus
1170 perform loop-header copying on-the-fly. */
1171 exit = single_exit (loop);
1172 niter = chrec_dont_know;
1173 if (exit && number_of_iterations_exit (loop, exit, &niter_desc, false))
1174 {
1175 niter = niter_desc.niter;
1176 may_be_zero
1177 = niter_desc.may_be_zero && !integer_zerop (niter_desc.may_be_zero);
1178 }
1179 if (TREE_CODE (niter) == INTEGER_CST)
1180 locus = gimple_location_safe (last_stmt (exit->src));
1181 else
1182 {
1183 /* For non-constant niter fold may_be_zero into niter again. */
1184 if (may_be_zero)
1185 {
1186 if (COMPARISON_CLASS_P (niter_desc.may_be_zero))
1187 niter = fold_build3 (COND_EXPR, TREE_TYPE (niter),
1188 niter_desc.may_be_zero,
1189 build_int_cst (TREE_TYPE (niter), 0), niter);
1190 else
1191 niter = chrec_dont_know;
1192 may_be_zero = false;
1193 }
1194
1195 /* If the loop has more than one exit, try checking all of them
1196 for # of iterations determinable through scev. */
1197 if (!exit)
1198 niter = find_loop_niter (loop, &exit);
1199
1200 /* Finally if everything else fails, try brute force evaluation. */
1201 if (try_eval
1202 && (chrec_contains_undetermined (niter)
1203 || TREE_CODE (niter) != INTEGER_CST))
1204 niter = find_loop_niter_by_eval (loop, &exit);
1205
1206 if (exit)
1207 locus = gimple_location_safe (last_stmt (exit->src));
1208
1209 if (TREE_CODE (niter) != INTEGER_CST)
1210 exit = NULL;
1211 }
1212
1213 /* We work exceptionally hard here to estimate the bound
1214 by find_loop_niter_by_eval. Be sure to keep it for future. */
1215 if (niter && TREE_CODE (niter) == INTEGER_CST)
1216 {
1217 record_niter_bound (loop, wi::to_widest (niter),
1218 exit == single_likely_exit (loop), true);
1219 }
1220
1221 /* Force re-computation of loop bounds so we can remove redundant exits. */
1222 maxiter = max_loop_iterations_int (loop);
1223
1224 if (dump_file && (dump_flags & TDF_DETAILS)
1225 && TREE_CODE (niter) == INTEGER_CST)
1226 {
1227 fprintf (dump_file, "Loop %d iterates ", loop->num);
1228 print_generic_expr (dump_file, niter, TDF_SLIM);
1229 fprintf (dump_file, " times.\n");
1230 }
1231 if (dump_file && (dump_flags & TDF_DETAILS)
1232 && maxiter >= 0)
1233 {
1234 fprintf (dump_file, "Loop %d iterates at most %i times.\n", loop->num,
1235 (int)maxiter);
1236 }
1237 if (dump_file && (dump_flags & TDF_DETAILS)
1238 && likely_max_loop_iterations_int (loop) >= 0)
1239 {
1240 fprintf (dump_file, "Loop %d likely iterates at most %i times.\n",
1241 loop->num, (int)likely_max_loop_iterations_int (loop));
1242 }
1243
1244 /* Remove exits that are known to be never taken based on loop bound.
1245 Needs to be called after compilation of max_loop_iterations_int that
1246 populates the loop bounds. */
1247 modified |= remove_redundant_iv_tests (loop);
1248
1249 if (try_unroll_loop_completely (loop, exit, niter, may_be_zero, ul,
1250 maxiter, locus, allow_peel))
1251 return true;
1252
1253 if (create_iv
1254 && niter && !chrec_contains_undetermined (niter)
1255 && exit && just_once_each_iteration_p (loop, exit->src))
1256 {
1257 tree iv_niter = niter;
1258 if (may_be_zero)
1259 {
1260 if (COMPARISON_CLASS_P (niter_desc.may_be_zero))
1261 iv_niter = fold_build3 (COND_EXPR, TREE_TYPE (iv_niter),
1262 niter_desc.may_be_zero,
1263 build_int_cst (TREE_TYPE (iv_niter), 0),
1264 iv_niter);
1265 else
1266 iv_niter = NULL_TREE;
1267 }
1268 if (iv_niter)
1269 create_canonical_iv (loop, exit, iv_niter);
1270 }
1271
1272 if (ul == UL_ALL)
1273 modified |= try_peel_loop (loop, exit, niter, may_be_zero, maxiter);
1274
1275 return modified;
1276 }
1277
1278 /* The main entry point of the pass. Adds canonical induction variables
1279 to the suitable loops. */
1280
1281 unsigned int
canonicalize_induction_variables(void)1282 canonicalize_induction_variables (void)
1283 {
1284 struct loop *loop;
1285 bool changed = false;
1286 bool irred_invalidated = false;
1287 bitmap loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
1288
1289 estimate_numbers_of_iterations (cfun);
1290
1291 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1292 {
1293 changed |= canonicalize_loop_induction_variables (loop,
1294 true, UL_SINGLE_ITER,
1295 true, false);
1296 }
1297 gcc_assert (!need_ssa_update_p (cfun));
1298
1299 unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated);
1300 if (irred_invalidated
1301 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1302 mark_irreducible_loops ();
1303
1304 /* Clean up the information about numbers of iterations, since brute force
1305 evaluation could reveal new information. */
1306 free_numbers_of_iterations_estimates (cfun);
1307 scev_reset ();
1308
1309 if (!bitmap_empty_p (loop_closed_ssa_invalidated))
1310 {
1311 gcc_checking_assert (loops_state_satisfies_p (LOOP_CLOSED_SSA));
1312 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1313 }
1314 BITMAP_FREE (loop_closed_ssa_invalidated);
1315
1316 if (changed)
1317 return TODO_cleanup_cfg;
1318 return 0;
1319 }
1320
1321 /* Propagate constant SSA_NAMEs defined in basic block BB. */
1322
1323 static void
propagate_constants_for_unrolling(basic_block bb)1324 propagate_constants_for_unrolling (basic_block bb)
1325 {
1326 /* Look for degenerate PHI nodes with constant argument. */
1327 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
1328 {
1329 gphi *phi = gsi.phi ();
1330 tree result = gimple_phi_result (phi);
1331 tree arg = gimple_phi_arg_def (phi, 0);
1332
1333 if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (result)
1334 && gimple_phi_num_args (phi) == 1
1335 && CONSTANT_CLASS_P (arg))
1336 {
1337 replace_uses_by (result, arg);
1338 gsi_remove (&gsi, true);
1339 release_ssa_name (result);
1340 }
1341 else
1342 gsi_next (&gsi);
1343 }
1344
1345 /* Look for assignments to SSA names with constant RHS. */
1346 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
1347 {
1348 gimple *stmt = gsi_stmt (gsi);
1349 tree lhs;
1350
1351 if (is_gimple_assign (stmt)
1352 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_constant
1353 && (lhs = gimple_assign_lhs (stmt), TREE_CODE (lhs) == SSA_NAME)
1354 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
1355 {
1356 replace_uses_by (lhs, gimple_assign_rhs1 (stmt));
1357 gsi_remove (&gsi, true);
1358 release_ssa_name (lhs);
1359 }
1360 else
1361 gsi_next (&gsi);
1362 }
1363 }
1364
1365 /* Process loops from innermost to outer, stopping at the innermost
1366 loop we unrolled. */
1367
1368 static bool
tree_unroll_loops_completely_1(bool may_increase_size,bool unroll_outer,bitmap father_bbs,struct loop * loop)1369 tree_unroll_loops_completely_1 (bool may_increase_size, bool unroll_outer,
1370 bitmap father_bbs, struct loop *loop)
1371 {
1372 struct loop *loop_father;
1373 bool changed = false;
1374 struct loop *inner;
1375 enum unroll_level ul;
1376 unsigned num = number_of_loops (cfun);
1377
1378 /* Process inner loops first. Don't walk loops added by the recursive
1379 calls because SSA form is not up-to-date. They can be handled in the
1380 next iteration. */
1381 for (inner = loop->inner; inner != NULL; inner = inner->next)
1382 if ((unsigned) inner->num < num)
1383 changed |= tree_unroll_loops_completely_1 (may_increase_size,
1384 unroll_outer, father_bbs,
1385 inner);
1386
1387 /* If we changed an inner loop we cannot process outer loops in this
1388 iteration because SSA form is not up-to-date. Continue with
1389 siblings of outer loops instead. */
1390 if (changed)
1391 return true;
1392
1393 /* Don't unroll #pragma omp simd loops until the vectorizer
1394 attempts to vectorize those. */
1395 if (loop->force_vectorize)
1396 return false;
1397
1398 /* Try to unroll this loop. */
1399 loop_father = loop_outer (loop);
1400 if (!loop_father)
1401 return false;
1402
1403 if (loop->unroll > 1)
1404 ul = UL_ALL;
1405 else if (may_increase_size && optimize_loop_nest_for_speed_p (loop)
1406 /* Unroll outermost loops only if asked to do so or they do
1407 not cause code growth. */
1408 && (unroll_outer || loop_outer (loop_father)))
1409 ul = UL_ALL;
1410 else
1411 ul = UL_NO_GROWTH;
1412
1413 if (canonicalize_loop_induction_variables
1414 (loop, false, ul, !flag_tree_loop_ivcanon, unroll_outer))
1415 {
1416 /* If we'll continue unrolling, we need to propagate constants
1417 within the new basic blocks to fold away induction variable
1418 computations; otherwise, the size might blow up before the
1419 iteration is complete and the IR eventually cleaned up. */
1420 if (loop_outer (loop_father))
1421 bitmap_set_bit (father_bbs, loop_father->header->index);
1422
1423 return true;
1424 }
1425
1426 return false;
1427 }
1428
1429 /* Unroll LOOPS completely if they iterate just few times. Unless
1430 MAY_INCREASE_SIZE is true, perform the unrolling only if the
1431 size of the code does not increase. */
1432
1433 static unsigned int
tree_unroll_loops_completely(bool may_increase_size,bool unroll_outer)1434 tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
1435 {
1436 bitmap father_bbs = BITMAP_ALLOC (NULL);
1437 bool changed;
1438 int iteration = 0;
1439 bool irred_invalidated = false;
1440
1441 estimate_numbers_of_iterations (cfun);
1442
1443 do
1444 {
1445 changed = false;
1446 bitmap loop_closed_ssa_invalidated = NULL;
1447
1448 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
1449 loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
1450
1451 free_numbers_of_iterations_estimates (cfun);
1452 estimate_numbers_of_iterations (cfun);
1453
1454 changed = tree_unroll_loops_completely_1 (may_increase_size,
1455 unroll_outer, father_bbs,
1456 current_loops->tree_root);
1457 if (changed)
1458 {
1459 unsigned i;
1460
1461 unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated);
1462
1463 /* We can not use TODO_update_ssa_no_phi because VOPS gets confused. */
1464 if (loop_closed_ssa_invalidated
1465 && !bitmap_empty_p (loop_closed_ssa_invalidated))
1466 rewrite_into_loop_closed_ssa (loop_closed_ssa_invalidated,
1467 TODO_update_ssa);
1468 else
1469 update_ssa (TODO_update_ssa);
1470
1471 /* father_bbs is a bitmap of loop father header BB indices.
1472 Translate that to what non-root loops these BBs belong to now. */
1473 bitmap_iterator bi;
1474 bitmap fathers = BITMAP_ALLOC (NULL);
1475 EXECUTE_IF_SET_IN_BITMAP (father_bbs, 0, i, bi)
1476 {
1477 basic_block unrolled_loop_bb = BASIC_BLOCK_FOR_FN (cfun, i);
1478 if (! unrolled_loop_bb)
1479 continue;
1480 if (loop_outer (unrolled_loop_bb->loop_father))
1481 bitmap_set_bit (fathers,
1482 unrolled_loop_bb->loop_father->num);
1483 }
1484 bitmap_clear (father_bbs);
1485 /* Propagate the constants within the new basic blocks. */
1486 EXECUTE_IF_SET_IN_BITMAP (fathers, 0, i, bi)
1487 {
1488 loop_p father = get_loop (cfun, i);
1489 basic_block *body = get_loop_body_in_dom_order (father);
1490 for (unsigned j = 0; j < father->num_nodes; j++)
1491 propagate_constants_for_unrolling (body[j]);
1492 free (body);
1493 }
1494 BITMAP_FREE (fathers);
1495
1496 /* This will take care of removing completely unrolled loops
1497 from the loop structures so we can continue unrolling now
1498 innermost loops. */
1499 if (cleanup_tree_cfg ())
1500 update_ssa (TODO_update_ssa_only_virtuals);
1501
1502 /* Clean up the information about numbers of iterations, since
1503 complete unrolling might have invalidated it. */
1504 scev_reset ();
1505 if (flag_checking && loops_state_satisfies_p (LOOP_CLOSED_SSA))
1506 verify_loop_closed_ssa (true);
1507 }
1508 if (loop_closed_ssa_invalidated)
1509 BITMAP_FREE (loop_closed_ssa_invalidated);
1510 }
1511 while (changed
1512 && ++iteration <= PARAM_VALUE (PARAM_MAX_UNROLL_ITERATIONS));
1513
1514 BITMAP_FREE (father_bbs);
1515
1516 if (irred_invalidated
1517 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1518 mark_irreducible_loops ();
1519
1520 return 0;
1521 }
1522
1523 /* Canonical induction variable creation pass. */
1524
1525 namespace {
1526
1527 const pass_data pass_data_iv_canon =
1528 {
1529 GIMPLE_PASS, /* type */
1530 "ivcanon", /* name */
1531 OPTGROUP_LOOP, /* optinfo_flags */
1532 TV_TREE_LOOP_IVCANON, /* tv_id */
1533 ( PROP_cfg | PROP_ssa ), /* properties_required */
1534 0, /* properties_provided */
1535 0, /* properties_destroyed */
1536 0, /* todo_flags_start */
1537 0, /* todo_flags_finish */
1538 };
1539
1540 class pass_iv_canon : public gimple_opt_pass
1541 {
1542 public:
pass_iv_canon(gcc::context * ctxt)1543 pass_iv_canon (gcc::context *ctxt)
1544 : gimple_opt_pass (pass_data_iv_canon, ctxt)
1545 {}
1546
1547 /* opt_pass methods: */
gate(function *)1548 virtual bool gate (function *) { return flag_tree_loop_ivcanon != 0; }
1549 virtual unsigned int execute (function *fun);
1550
1551 }; // class pass_iv_canon
1552
1553 unsigned int
execute(function * fun)1554 pass_iv_canon::execute (function *fun)
1555 {
1556 if (number_of_loops (fun) <= 1)
1557 return 0;
1558
1559 return canonicalize_induction_variables ();
1560 }
1561
1562 } // anon namespace
1563
1564 gimple_opt_pass *
make_pass_iv_canon(gcc::context * ctxt)1565 make_pass_iv_canon (gcc::context *ctxt)
1566 {
1567 return new pass_iv_canon (ctxt);
1568 }
1569
1570 /* Complete unrolling of loops. */
1571
1572 namespace {
1573
1574 const pass_data pass_data_complete_unroll =
1575 {
1576 GIMPLE_PASS, /* type */
1577 "cunroll", /* name */
1578 OPTGROUP_LOOP, /* optinfo_flags */
1579 TV_COMPLETE_UNROLL, /* tv_id */
1580 ( PROP_cfg | PROP_ssa ), /* properties_required */
1581 0, /* properties_provided */
1582 0, /* properties_destroyed */
1583 0, /* todo_flags_start */
1584 0, /* todo_flags_finish */
1585 };
1586
1587 class pass_complete_unroll : public gimple_opt_pass
1588 {
1589 public:
pass_complete_unroll(gcc::context * ctxt)1590 pass_complete_unroll (gcc::context *ctxt)
1591 : gimple_opt_pass (pass_data_complete_unroll, ctxt)
1592 {}
1593
1594 /* opt_pass methods: */
1595 virtual unsigned int execute (function *);
1596
1597 }; // class pass_complete_unroll
1598
1599 unsigned int
execute(function * fun)1600 pass_complete_unroll::execute (function *fun)
1601 {
1602 if (number_of_loops (fun) <= 1)
1603 return 0;
1604
1605 /* If we ever decide to run loop peeling more than once, we will need to
1606 track loops already peeled in loop structures themselves to avoid
1607 re-peeling the same loop multiple times. */
1608 if (flag_peel_loops)
1609 peeled_loops = BITMAP_ALLOC (NULL);
1610 unsigned int val = tree_unroll_loops_completely (flag_unroll_loops
1611 || flag_peel_loops
1612 || optimize >= 3, true);
1613 if (peeled_loops)
1614 {
1615 BITMAP_FREE (peeled_loops);
1616 peeled_loops = NULL;
1617 }
1618 return val;
1619 }
1620
1621 } // anon namespace
1622
1623 gimple_opt_pass *
make_pass_complete_unroll(gcc::context * ctxt)1624 make_pass_complete_unroll (gcc::context *ctxt)
1625 {
1626 return new pass_complete_unroll (ctxt);
1627 }
1628
1629 /* Complete unrolling of inner loops. */
1630
1631 namespace {
1632
1633 const pass_data pass_data_complete_unrolli =
1634 {
1635 GIMPLE_PASS, /* type */
1636 "cunrolli", /* name */
1637 OPTGROUP_LOOP, /* optinfo_flags */
1638 TV_COMPLETE_UNROLL, /* tv_id */
1639 ( PROP_cfg | PROP_ssa ), /* properties_required */
1640 0, /* properties_provided */
1641 0, /* properties_destroyed */
1642 0, /* todo_flags_start */
1643 0, /* todo_flags_finish */
1644 };
1645
1646 class pass_complete_unrolli : public gimple_opt_pass
1647 {
1648 public:
pass_complete_unrolli(gcc::context * ctxt)1649 pass_complete_unrolli (gcc::context *ctxt)
1650 : gimple_opt_pass (pass_data_complete_unrolli, ctxt)
1651 {}
1652
1653 /* opt_pass methods: */
gate(function *)1654 virtual bool gate (function *) { return optimize >= 2; }
1655 virtual unsigned int execute (function *);
1656
1657 }; // class pass_complete_unrolli
1658
1659 unsigned int
execute(function * fun)1660 pass_complete_unrolli::execute (function *fun)
1661 {
1662 unsigned ret = 0;
1663
1664 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
1665 if (number_of_loops (fun) > 1)
1666 {
1667 scev_initialize ();
1668 ret = tree_unroll_loops_completely (optimize >= 3, false);
1669 scev_finalize ();
1670 }
1671 loop_optimizer_finalize ();
1672
1673 return ret;
1674 }
1675
1676 } // anon namespace
1677
1678 gimple_opt_pass *
make_pass_complete_unrolli(gcc::context * ctxt)1679 make_pass_complete_unrolli (gcc::context *ctxt)
1680 {
1681 return new pass_complete_unrolli (ctxt);
1682 }
1683
1684
1685