1 /* Loop manipulation code for GNU compiler.
2 Copyright (C) 2002-2018 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "cfganal.h"
29 #include "cfgloop.h"
30 #include "gimple-iterator.h"
31 #include "gimplify-me.h"
32 #include "tree-ssa-loop-manip.h"
33 #include "dumpfile.h"
34
35 static void copy_loops_to (struct loop **, int,
36 struct loop *);
37 static void loop_redirect_edge (edge, basic_block);
38 static void remove_bbs (basic_block *, int);
39 static bool rpe_enum_p (const_basic_block, const void *);
40 static int find_path (edge, basic_block **);
41 static void fix_loop_placements (struct loop *, bool *);
42 static bool fix_bb_placement (basic_block);
43 static void fix_bb_placements (basic_block, bool *, bitmap);
44
45 /* Checks whether basic block BB is dominated by DATA. */
46 static bool
rpe_enum_p(const_basic_block bb,const void * data)47 rpe_enum_p (const_basic_block bb, const void *data)
48 {
49 return dominated_by_p (CDI_DOMINATORS, bb, (const_basic_block) data);
50 }
51
52 /* Remove basic blocks BBS. NBBS is the number of the basic blocks. */
53
54 static void
remove_bbs(basic_block * bbs,int nbbs)55 remove_bbs (basic_block *bbs, int nbbs)
56 {
57 int i;
58
59 for (i = 0; i < nbbs; i++)
60 delete_basic_block (bbs[i]);
61 }
62
63 /* Find path -- i.e. the basic blocks dominated by edge E and put them
64 into array BBS, that will be allocated large enough to contain them.
65 E->dest must have exactly one predecessor for this to work (it is
66 easy to achieve and we do not put it here because we do not want to
67 alter anything by this function). The number of basic blocks in the
68 path is returned. */
69 static int
find_path(edge e,basic_block ** bbs)70 find_path (edge e, basic_block **bbs)
71 {
72 gcc_assert (EDGE_COUNT (e->dest->preds) <= 1);
73
74 /* Find bbs in the path. */
75 *bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
76 return dfs_enumerate_from (e->dest, 0, rpe_enum_p, *bbs,
77 n_basic_blocks_for_fn (cfun), e->dest);
78 }
79
80 /* Fix placement of basic block BB inside loop hierarchy --
81 Let L be a loop to that BB belongs. Then every successor of BB must either
82 1) belong to some superloop of loop L, or
83 2) be a header of loop K such that K->outer is superloop of L
84 Returns true if we had to move BB into other loop to enforce this condition,
85 false if the placement of BB was already correct (provided that placements
86 of its successors are correct). */
87 static bool
fix_bb_placement(basic_block bb)88 fix_bb_placement (basic_block bb)
89 {
90 edge e;
91 edge_iterator ei;
92 struct loop *loop = current_loops->tree_root, *act;
93
94 FOR_EACH_EDGE (e, ei, bb->succs)
95 {
96 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
97 continue;
98
99 act = e->dest->loop_father;
100 if (act->header == e->dest)
101 act = loop_outer (act);
102
103 if (flow_loop_nested_p (loop, act))
104 loop = act;
105 }
106
107 if (loop == bb->loop_father)
108 return false;
109
110 remove_bb_from_loops (bb);
111 add_bb_to_loop (bb, loop);
112
113 return true;
114 }
115
116 /* Fix placement of LOOP inside loop tree, i.e. find the innermost superloop
117 of LOOP to that leads at least one exit edge of LOOP, and set it
118 as the immediate superloop of LOOP. Return true if the immediate superloop
119 of LOOP changed.
120
121 IRRED_INVALIDATED is set to true if a change in the loop structures might
122 invalidate the information about irreducible regions. */
123
124 static bool
fix_loop_placement(struct loop * loop,bool * irred_invalidated)125 fix_loop_placement (struct loop *loop, bool *irred_invalidated)
126 {
127 unsigned i;
128 edge e;
129 vec<edge> exits = get_loop_exit_edges (loop);
130 struct loop *father = current_loops->tree_root, *act;
131 bool ret = false;
132
133 FOR_EACH_VEC_ELT (exits, i, e)
134 {
135 act = find_common_loop (loop, e->dest->loop_father);
136 if (flow_loop_nested_p (father, act))
137 father = act;
138 }
139
140 if (father != loop_outer (loop))
141 {
142 for (act = loop_outer (loop); act != father; act = loop_outer (act))
143 act->num_nodes -= loop->num_nodes;
144 flow_loop_tree_node_remove (loop);
145 flow_loop_tree_node_add (father, loop);
146
147 /* The exit edges of LOOP no longer exits its original immediate
148 superloops; remove them from the appropriate exit lists. */
149 FOR_EACH_VEC_ELT (exits, i, e)
150 {
151 /* We may need to recompute irreducible loops. */
152 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
153 *irred_invalidated = true;
154 rescan_loop_exit (e, false, false);
155 }
156
157 ret = true;
158 }
159
160 exits.release ();
161 return ret;
162 }
163
164 /* Fix placements of basic blocks inside loop hierarchy stored in loops; i.e.
165 enforce condition stated in description of fix_bb_placement. We
166 start from basic block FROM that had some of its successors removed, so that
167 his placement no longer has to be correct, and iteratively fix placement of
168 its predecessors that may change if placement of FROM changed. Also fix
169 placement of subloops of FROM->loop_father, that might also be altered due
170 to this change; the condition for them is similar, except that instead of
171 successors we consider edges coming out of the loops.
172
173 If the changes may invalidate the information about irreducible regions,
174 IRRED_INVALIDATED is set to true.
175
176 If LOOP_CLOSED_SSA_INVLIDATED is non-zero then all basic blocks with
177 changed loop_father are collected there. */
178
179 static void
fix_bb_placements(basic_block from,bool * irred_invalidated,bitmap loop_closed_ssa_invalidated)180 fix_bb_placements (basic_block from,
181 bool *irred_invalidated,
182 bitmap loop_closed_ssa_invalidated)
183 {
184 basic_block *queue, *qtop, *qbeg, *qend;
185 struct loop *base_loop, *target_loop;
186 edge e;
187
188 /* We pass through blocks back-reachable from FROM, testing whether some
189 of their successors moved to outer loop. It may be necessary to
190 iterate several times, but it is finite, as we stop unless we move
191 the basic block up the loop structure. The whole story is a bit
192 more complicated due to presence of subloops, those are moved using
193 fix_loop_placement. */
194
195 base_loop = from->loop_father;
196 /* If we are already in the outermost loop, the basic blocks cannot be moved
197 outside of it. If FROM is the header of the base loop, it cannot be moved
198 outside of it, either. In both cases, we can end now. */
199 if (base_loop == current_loops->tree_root
200 || from == base_loop->header)
201 return;
202
203 auto_sbitmap in_queue (last_basic_block_for_fn (cfun));
204 bitmap_clear (in_queue);
205 bitmap_set_bit (in_queue, from->index);
206 /* Prevent us from going out of the base_loop. */
207 bitmap_set_bit (in_queue, base_loop->header->index);
208
209 queue = XNEWVEC (basic_block, base_loop->num_nodes + 1);
210 qtop = queue + base_loop->num_nodes + 1;
211 qbeg = queue;
212 qend = queue + 1;
213 *qbeg = from;
214
215 while (qbeg != qend)
216 {
217 edge_iterator ei;
218 from = *qbeg;
219 qbeg++;
220 if (qbeg == qtop)
221 qbeg = queue;
222 bitmap_clear_bit (in_queue, from->index);
223
224 if (from->loop_father->header == from)
225 {
226 /* Subloop header, maybe move the loop upward. */
227 if (!fix_loop_placement (from->loop_father, irred_invalidated))
228 continue;
229 target_loop = loop_outer (from->loop_father);
230 if (loop_closed_ssa_invalidated)
231 {
232 basic_block *bbs = get_loop_body (from->loop_father);
233 for (unsigned i = 0; i < from->loop_father->num_nodes; ++i)
234 bitmap_set_bit (loop_closed_ssa_invalidated, bbs[i]->index);
235 free (bbs);
236 }
237 }
238 else
239 {
240 /* Ordinary basic block. */
241 if (!fix_bb_placement (from))
242 continue;
243 target_loop = from->loop_father;
244 if (loop_closed_ssa_invalidated)
245 bitmap_set_bit (loop_closed_ssa_invalidated, from->index);
246 }
247
248 FOR_EACH_EDGE (e, ei, from->succs)
249 {
250 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
251 *irred_invalidated = true;
252 }
253
254 /* Something has changed, insert predecessors into queue. */
255 FOR_EACH_EDGE (e, ei, from->preds)
256 {
257 basic_block pred = e->src;
258 struct loop *nca;
259
260 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
261 *irred_invalidated = true;
262
263 if (bitmap_bit_p (in_queue, pred->index))
264 continue;
265
266 /* If it is subloop, then it either was not moved, or
267 the path up the loop tree from base_loop do not contain
268 it. */
269 nca = find_common_loop (pred->loop_father, base_loop);
270 if (pred->loop_father != base_loop
271 && (nca == base_loop
272 || nca != pred->loop_father))
273 pred = pred->loop_father->header;
274 else if (!flow_loop_nested_p (target_loop, pred->loop_father))
275 {
276 /* If PRED is already higher in the loop hierarchy than the
277 TARGET_LOOP to that we moved FROM, the change of the position
278 of FROM does not affect the position of PRED, so there is no
279 point in processing it. */
280 continue;
281 }
282
283 if (bitmap_bit_p (in_queue, pred->index))
284 continue;
285
286 /* Schedule the basic block. */
287 *qend = pred;
288 qend++;
289 if (qend == qtop)
290 qend = queue;
291 bitmap_set_bit (in_queue, pred->index);
292 }
293 }
294 free (queue);
295 }
296
297 /* Removes path beginning at edge E, i.e. remove basic blocks dominated by E
298 and update loop structures and dominators. Return true if we were able
299 to remove the path, false otherwise (and nothing is affected then). */
300 bool
remove_path(edge e,bool * irred_invalidated,bitmap loop_closed_ssa_invalidated)301 remove_path (edge e, bool *irred_invalidated,
302 bitmap loop_closed_ssa_invalidated)
303 {
304 edge ae;
305 basic_block *rem_bbs, *bord_bbs, from, bb;
306 vec<basic_block> dom_bbs;
307 int i, nrem, n_bord_bbs;
308 bool local_irred_invalidated = false;
309 edge_iterator ei;
310 struct loop *l, *f;
311
312 if (! irred_invalidated)
313 irred_invalidated = &local_irred_invalidated;
314
315 if (!can_remove_branch_p (e))
316 return false;
317
318 /* Keep track of whether we need to update information about irreducible
319 regions. This is the case if the removed area is a part of the
320 irreducible region, or if the set of basic blocks that belong to a loop
321 that is inside an irreducible region is changed, or if such a loop is
322 removed. */
323 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
324 *irred_invalidated = true;
325
326 /* We need to check whether basic blocks are dominated by the edge
327 e, but we only have basic block dominators. This is easy to
328 fix -- when e->dest has exactly one predecessor, this corresponds
329 to blocks dominated by e->dest, if not, split the edge. */
330 if (!single_pred_p (e->dest))
331 e = single_pred_edge (split_edge (e));
332
333 /* It may happen that by removing path we remove one or more loops
334 we belong to. In this case first unloop the loops, then proceed
335 normally. We may assume that e->dest is not a header of any loop,
336 as it now has exactly one predecessor. */
337 for (l = e->src->loop_father; loop_outer (l); l = f)
338 {
339 f = loop_outer (l);
340 if (dominated_by_p (CDI_DOMINATORS, l->latch, e->dest))
341 unloop (l, irred_invalidated, loop_closed_ssa_invalidated);
342 }
343
344 /* Identify the path. */
345 nrem = find_path (e, &rem_bbs);
346
347 n_bord_bbs = 0;
348 bord_bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
349 auto_sbitmap seen (last_basic_block_for_fn (cfun));
350 bitmap_clear (seen);
351
352 /* Find "border" hexes -- i.e. those with predecessor in removed path. */
353 for (i = 0; i < nrem; i++)
354 bitmap_set_bit (seen, rem_bbs[i]->index);
355 if (!*irred_invalidated)
356 FOR_EACH_EDGE (ae, ei, e->src->succs)
357 if (ae != e && ae->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
358 && !bitmap_bit_p (seen, ae->dest->index)
359 && ae->flags & EDGE_IRREDUCIBLE_LOOP)
360 {
361 *irred_invalidated = true;
362 break;
363 }
364
365 for (i = 0; i < nrem; i++)
366 {
367 bb = rem_bbs[i];
368 FOR_EACH_EDGE (ae, ei, rem_bbs[i]->succs)
369 if (ae->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
370 && !bitmap_bit_p (seen, ae->dest->index))
371 {
372 bitmap_set_bit (seen, ae->dest->index);
373 bord_bbs[n_bord_bbs++] = ae->dest;
374
375 if (ae->flags & EDGE_IRREDUCIBLE_LOOP)
376 *irred_invalidated = true;
377 }
378 }
379
380 /* Remove the path. */
381 from = e->src;
382 remove_branch (e);
383 dom_bbs.create (0);
384
385 /* Cancel loops contained in the path. */
386 for (i = 0; i < nrem; i++)
387 if (rem_bbs[i]->loop_father->header == rem_bbs[i])
388 cancel_loop_tree (rem_bbs[i]->loop_father);
389
390 remove_bbs (rem_bbs, nrem);
391 free (rem_bbs);
392
393 /* Find blocks whose dominators may be affected. */
394 bitmap_clear (seen);
395 for (i = 0; i < n_bord_bbs; i++)
396 {
397 basic_block ldom;
398
399 bb = get_immediate_dominator (CDI_DOMINATORS, bord_bbs[i]);
400 if (bitmap_bit_p (seen, bb->index))
401 continue;
402 bitmap_set_bit (seen, bb->index);
403
404 for (ldom = first_dom_son (CDI_DOMINATORS, bb);
405 ldom;
406 ldom = next_dom_son (CDI_DOMINATORS, ldom))
407 if (!dominated_by_p (CDI_DOMINATORS, from, ldom))
408 dom_bbs.safe_push (ldom);
409 }
410
411 /* Recount dominators. */
412 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, true);
413 dom_bbs.release ();
414 free (bord_bbs);
415
416 /* Fix placements of basic blocks inside loops and the placement of
417 loops in the loop tree. */
418 fix_bb_placements (from, irred_invalidated, loop_closed_ssa_invalidated);
419 fix_loop_placements (from->loop_father, irred_invalidated);
420
421 if (local_irred_invalidated
422 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
423 mark_irreducible_loops ();
424
425 return true;
426 }
427
428 /* Creates place for a new LOOP in loops structure of FN. */
429
430 void
place_new_loop(struct function * fn,struct loop * loop)431 place_new_loop (struct function *fn, struct loop *loop)
432 {
433 loop->num = number_of_loops (fn);
434 vec_safe_push (loops_for_fn (fn)->larray, loop);
435 }
436
437 /* Given LOOP structure with filled header and latch, find the body of the
438 corresponding loop and add it to loops tree. Insert the LOOP as a son of
439 outer. */
440
441 void
add_loop(struct loop * loop,struct loop * outer)442 add_loop (struct loop *loop, struct loop *outer)
443 {
444 basic_block *bbs;
445 int i, n;
446 struct loop *subloop;
447 edge e;
448 edge_iterator ei;
449
450 /* Add it to loop structure. */
451 place_new_loop (cfun, loop);
452 flow_loop_tree_node_add (outer, loop);
453
454 /* Find its nodes. */
455 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
456 n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
457
458 for (i = 0; i < n; i++)
459 {
460 if (bbs[i]->loop_father == outer)
461 {
462 remove_bb_from_loops (bbs[i]);
463 add_bb_to_loop (bbs[i], loop);
464 continue;
465 }
466
467 loop->num_nodes++;
468
469 /* If we find a direct subloop of OUTER, move it to LOOP. */
470 subloop = bbs[i]->loop_father;
471 if (loop_outer (subloop) == outer
472 && subloop->header == bbs[i])
473 {
474 flow_loop_tree_node_remove (subloop);
475 flow_loop_tree_node_add (loop, subloop);
476 }
477 }
478
479 /* Update the information about loop exit edges. */
480 for (i = 0; i < n; i++)
481 {
482 FOR_EACH_EDGE (e, ei, bbs[i]->succs)
483 {
484 rescan_loop_exit (e, false, false);
485 }
486 }
487
488 free (bbs);
489 }
490
491 /* Scale profile of loop by P. */
492
493 void
scale_loop_frequencies(struct loop * loop,profile_probability p)494 scale_loop_frequencies (struct loop *loop, profile_probability p)
495 {
496 basic_block *bbs;
497
498 bbs = get_loop_body (loop);
499 scale_bbs_frequencies (bbs, loop->num_nodes, p);
500 free (bbs);
501 }
502
503 /* Scale profile in LOOP by P.
504 If ITERATION_BOUND is non-zero, scale even further if loop is predicted
505 to iterate too many times.
506 Before caling this function, preheader block profile should be already
507 scaled to final count. This is necessary because loop iterations are
508 determined by comparing header edge count to latch ege count and thus
509 they need to be scaled synchronously. */
510
511 void
scale_loop_profile(struct loop * loop,profile_probability p,gcov_type iteration_bound)512 scale_loop_profile (struct loop *loop, profile_probability p,
513 gcov_type iteration_bound)
514 {
515 edge e, preheader_e;
516 edge_iterator ei;
517
518 if (dump_file && (dump_flags & TDF_DETAILS))
519 {
520 fprintf (dump_file, ";; Scaling loop %i with scale ",
521 loop->num);
522 p.dump (dump_file);
523 fprintf (dump_file, " bounding iterations to %i\n",
524 (int)iteration_bound);
525 }
526
527 /* Scale the probabilities. */
528 scale_loop_frequencies (loop, p);
529
530 if (iteration_bound == 0)
531 return;
532
533 gcov_type iterations = expected_loop_iterations_unbounded (loop, NULL, true);
534
535 if (dump_file && (dump_flags & TDF_DETAILS))
536 {
537 fprintf (dump_file, ";; guessed iterations after scaling %i\n",
538 (int)iterations);
539 }
540
541 /* See if loop is predicted to iterate too many times. */
542 if (iterations <= iteration_bound)
543 return;
544
545 preheader_e = loop_preheader_edge (loop);
546
547 /* We could handle also loops without preheaders, but bounding is
548 currently used only by optimizers that have preheaders constructed. */
549 gcc_checking_assert (preheader_e);
550 profile_count count_in = preheader_e->count ();
551
552 if (count_in > profile_count::zero ()
553 && loop->header->count.initialized_p ())
554 {
555 profile_count count_delta = profile_count::zero ();
556
557 e = single_exit (loop);
558 if (e)
559 {
560 edge other_e;
561 FOR_EACH_EDGE (other_e, ei, e->src->succs)
562 if (!(other_e->flags & (EDGE_ABNORMAL | EDGE_FAKE))
563 && e != other_e)
564 break;
565
566 /* Probability of exit must be 1/iterations. */
567 count_delta = e->count ();
568 e->probability = profile_probability::always ()
569 .apply_scale (1, iteration_bound);
570 other_e->probability = e->probability.invert ();
571
572 /* In code below we only handle the following two updates. */
573 if (other_e->dest != loop->header
574 && other_e->dest != loop->latch
575 && (dump_file && (dump_flags & TDF_DETAILS)))
576 {
577 fprintf (dump_file, ";; giving up on update of paths from "
578 "exit condition to latch\n");
579 }
580 }
581 else
582 if (dump_file && (dump_flags & TDF_DETAILS))
583 fprintf (dump_file, ";; Loop has multiple exit edges; "
584 "giving up on exit condition update\n");
585
586 /* Roughly speaking we want to reduce the loop body profile by the
587 difference of loop iterations. We however can do better if
588 we look at the actual profile, if it is available. */
589 p = profile_probability::always ();
590
591 count_in = count_in.apply_scale (iteration_bound, 1);
592 p = count_in.probability_in (loop->header->count);
593 if (!(p > profile_probability::never ()))
594 p = profile_probability::very_unlikely ();
595
596 if (p == profile_probability::always ()
597 || !p.initialized_p ())
598 return;
599
600 /* If latch exists, change its count, since we changed
601 probability of exit. Theoretically we should update everything from
602 source of exit edge to latch, but for vectorizer this is enough. */
603 if (loop->latch && loop->latch != e->src)
604 loop->latch->count += count_delta;
605
606 /* Scale the probabilities. */
607 scale_loop_frequencies (loop, p);
608
609 /* Change latch's count back. */
610 if (loop->latch && loop->latch != e->src)
611 loop->latch->count -= count_delta;
612
613 if (dump_file && (dump_flags & TDF_DETAILS))
614 fprintf (dump_file, ";; guessed iterations are now %i\n",
615 (int)expected_loop_iterations_unbounded (loop, NULL, true));
616 }
617 }
618
619 /* Recompute dominance information for basic blocks outside LOOP. */
620
621 static void
update_dominators_in_loop(struct loop * loop)622 update_dominators_in_loop (struct loop *loop)
623 {
624 vec<basic_block> dom_bbs = vNULL;
625 basic_block *body;
626 unsigned i;
627
628 auto_sbitmap seen (last_basic_block_for_fn (cfun));
629 bitmap_clear (seen);
630 body = get_loop_body (loop);
631
632 for (i = 0; i < loop->num_nodes; i++)
633 bitmap_set_bit (seen, body[i]->index);
634
635 for (i = 0; i < loop->num_nodes; i++)
636 {
637 basic_block ldom;
638
639 for (ldom = first_dom_son (CDI_DOMINATORS, body[i]);
640 ldom;
641 ldom = next_dom_son (CDI_DOMINATORS, ldom))
642 if (!bitmap_bit_p (seen, ldom->index))
643 {
644 bitmap_set_bit (seen, ldom->index);
645 dom_bbs.safe_push (ldom);
646 }
647 }
648
649 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
650 free (body);
651 dom_bbs.release ();
652 }
653
654 /* Creates an if region as shown above. CONDITION is used to create
655 the test for the if.
656
657 |
658 | ------------- -------------
659 | | pred_bb | | pred_bb |
660 | ------------- -------------
661 | | |
662 | | | ENTRY_EDGE
663 | | ENTRY_EDGE V
664 | | ====> -------------
665 | | | cond_bb |
666 | | | CONDITION |
667 | | -------------
668 | V / \
669 | ------------- e_false / \ e_true
670 | | succ_bb | V V
671 | ------------- ----------- -----------
672 | | false_bb | | true_bb |
673 | ----------- -----------
674 | \ /
675 | \ /
676 | V V
677 | -------------
678 | | join_bb |
679 | -------------
680 | | exit_edge (result)
681 | V
682 | -----------
683 | | succ_bb |
684 | -----------
685 |
686 */
687
688 edge
create_empty_if_region_on_edge(edge entry_edge,tree condition)689 create_empty_if_region_on_edge (edge entry_edge, tree condition)
690 {
691
692 basic_block cond_bb, true_bb, false_bb, join_bb;
693 edge e_true, e_false, exit_edge;
694 gcond *cond_stmt;
695 tree simple_cond;
696 gimple_stmt_iterator gsi;
697
698 cond_bb = split_edge (entry_edge);
699
700 /* Insert condition in cond_bb. */
701 gsi = gsi_last_bb (cond_bb);
702 simple_cond =
703 force_gimple_operand_gsi (&gsi, condition, true, NULL,
704 false, GSI_NEW_STMT);
705 cond_stmt = gimple_build_cond_from_tree (simple_cond, NULL_TREE, NULL_TREE);
706 gsi = gsi_last_bb (cond_bb);
707 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
708
709 join_bb = split_edge (single_succ_edge (cond_bb));
710
711 e_true = single_succ_edge (cond_bb);
712 true_bb = split_edge (e_true);
713
714 e_false = make_edge (cond_bb, join_bb, 0);
715 false_bb = split_edge (e_false);
716
717 e_true->flags &= ~EDGE_FALLTHRU;
718 e_true->flags |= EDGE_TRUE_VALUE;
719 e_false->flags &= ~EDGE_FALLTHRU;
720 e_false->flags |= EDGE_FALSE_VALUE;
721
722 set_immediate_dominator (CDI_DOMINATORS, cond_bb, entry_edge->src);
723 set_immediate_dominator (CDI_DOMINATORS, true_bb, cond_bb);
724 set_immediate_dominator (CDI_DOMINATORS, false_bb, cond_bb);
725 set_immediate_dominator (CDI_DOMINATORS, join_bb, cond_bb);
726
727 exit_edge = single_succ_edge (join_bb);
728
729 if (single_pred_p (exit_edge->dest))
730 set_immediate_dominator (CDI_DOMINATORS, exit_edge->dest, join_bb);
731
732 return exit_edge;
733 }
734
735 /* create_empty_loop_on_edge
736 |
737 | - pred_bb - ------ pred_bb ------
738 | | | | iv0 = initial_value |
739 | -----|----- ---------|-----------
740 | | ______ | entry_edge
741 | | entry_edge / | |
742 | | ====> | -V---V- loop_header -------------
743 | V | | iv_before = phi (iv0, iv_after) |
744 | - succ_bb - | ---|-----------------------------
745 | | | | |
746 | ----------- | ---V--- loop_body ---------------
747 | | | iv_after = iv_before + stride |
748 | | | if (iv_before < upper_bound) |
749 | | ---|--------------\--------------
750 | | | \ exit_e
751 | | V \
752 | | - loop_latch - V- succ_bb -
753 | | | | | |
754 | | /------------- -----------
755 | \ ___ /
756
757 Creates an empty loop as shown above, the IV_BEFORE is the SSA_NAME
758 that is used before the increment of IV. IV_BEFORE should be used for
759 adding code to the body that uses the IV. OUTER is the outer loop in
760 which the new loop should be inserted.
761
762 Both INITIAL_VALUE and UPPER_BOUND expressions are gimplified and
763 inserted on the loop entry edge. This implies that this function
764 should be used only when the UPPER_BOUND expression is a loop
765 invariant. */
766
767 struct loop *
create_empty_loop_on_edge(edge entry_edge,tree initial_value,tree stride,tree upper_bound,tree iv,tree * iv_before,tree * iv_after,struct loop * outer)768 create_empty_loop_on_edge (edge entry_edge,
769 tree initial_value,
770 tree stride, tree upper_bound,
771 tree iv,
772 tree *iv_before,
773 tree *iv_after,
774 struct loop *outer)
775 {
776 basic_block loop_header, loop_latch, succ_bb, pred_bb;
777 struct loop *loop;
778 gimple_stmt_iterator gsi;
779 gimple_seq stmts;
780 gcond *cond_expr;
781 tree exit_test;
782 edge exit_e;
783
784 gcc_assert (entry_edge && initial_value && stride && upper_bound && iv);
785
786 /* Create header, latch and wire up the loop. */
787 pred_bb = entry_edge->src;
788 loop_header = split_edge (entry_edge);
789 loop_latch = split_edge (single_succ_edge (loop_header));
790 succ_bb = single_succ (loop_latch);
791 make_edge (loop_header, succ_bb, 0);
792 redirect_edge_succ_nodup (single_succ_edge (loop_latch), loop_header);
793
794 /* Set immediate dominator information. */
795 set_immediate_dominator (CDI_DOMINATORS, loop_header, pred_bb);
796 set_immediate_dominator (CDI_DOMINATORS, loop_latch, loop_header);
797 set_immediate_dominator (CDI_DOMINATORS, succ_bb, loop_header);
798
799 /* Initialize a loop structure and put it in a loop hierarchy. */
800 loop = alloc_loop ();
801 loop->header = loop_header;
802 loop->latch = loop_latch;
803 add_loop (loop, outer);
804
805 /* TODO: Fix counts. */
806 scale_loop_frequencies (loop, profile_probability::even ());
807
808 /* Update dominators. */
809 update_dominators_in_loop (loop);
810
811 /* Modify edge flags. */
812 exit_e = single_exit (loop);
813 exit_e->flags = EDGE_LOOP_EXIT | EDGE_FALSE_VALUE;
814 single_pred_edge (loop_latch)->flags = EDGE_TRUE_VALUE;
815
816 /* Construct IV code in loop. */
817 initial_value = force_gimple_operand (initial_value, &stmts, true, iv);
818 if (stmts)
819 {
820 gsi_insert_seq_on_edge (loop_preheader_edge (loop), stmts);
821 gsi_commit_edge_inserts ();
822 }
823
824 upper_bound = force_gimple_operand (upper_bound, &stmts, true, NULL);
825 if (stmts)
826 {
827 gsi_insert_seq_on_edge (loop_preheader_edge (loop), stmts);
828 gsi_commit_edge_inserts ();
829 }
830
831 gsi = gsi_last_bb (loop_header);
832 create_iv (initial_value, stride, iv, loop, &gsi, false,
833 iv_before, iv_after);
834
835 /* Insert loop exit condition. */
836 cond_expr = gimple_build_cond
837 (LT_EXPR, *iv_before, upper_bound, NULL_TREE, NULL_TREE);
838
839 exit_test = gimple_cond_lhs (cond_expr);
840 exit_test = force_gimple_operand_gsi (&gsi, exit_test, true, NULL,
841 false, GSI_NEW_STMT);
842 gimple_cond_set_lhs (cond_expr, exit_test);
843 gsi = gsi_last_bb (exit_e->src);
844 gsi_insert_after (&gsi, cond_expr, GSI_NEW_STMT);
845
846 split_block_after_labels (loop_header);
847
848 return loop;
849 }
850
851 /* Make area between HEADER_EDGE and LATCH_EDGE a loop by connecting
852 latch to header and update loop tree and dominators
853 accordingly. Everything between them plus LATCH_EDGE destination must
854 be dominated by HEADER_EDGE destination, and back-reachable from
855 LATCH_EDGE source. HEADER_EDGE is redirected to basic block SWITCH_BB,
856 FALSE_EDGE of SWITCH_BB to original destination of HEADER_EDGE and
857 TRUE_EDGE of SWITCH_BB to original destination of LATCH_EDGE.
858 Returns the newly created loop. Frequencies and counts in the new loop
859 are scaled by FALSE_SCALE and in the old one by TRUE_SCALE. */
860
861 struct loop *
loopify(edge latch_edge,edge header_edge,basic_block switch_bb,edge true_edge,edge false_edge,bool redirect_all_edges,profile_probability true_scale,profile_probability false_scale)862 loopify (edge latch_edge, edge header_edge,
863 basic_block switch_bb, edge true_edge, edge false_edge,
864 bool redirect_all_edges, profile_probability true_scale,
865 profile_probability false_scale)
866 {
867 basic_block succ_bb = latch_edge->dest;
868 basic_block pred_bb = header_edge->src;
869 struct loop *loop = alloc_loop ();
870 struct loop *outer = loop_outer (succ_bb->loop_father);
871 profile_count cnt;
872
873 loop->header = header_edge->dest;
874 loop->latch = latch_edge->src;
875
876 cnt = header_edge->count ();
877
878 /* Redirect edges. */
879 loop_redirect_edge (latch_edge, loop->header);
880 loop_redirect_edge (true_edge, succ_bb);
881
882 /* During loop versioning, one of the switch_bb edge is already properly
883 set. Do not redirect it again unless redirect_all_edges is true. */
884 if (redirect_all_edges)
885 {
886 loop_redirect_edge (header_edge, switch_bb);
887 loop_redirect_edge (false_edge, loop->header);
888
889 /* Update dominators. */
890 set_immediate_dominator (CDI_DOMINATORS, switch_bb, pred_bb);
891 set_immediate_dominator (CDI_DOMINATORS, loop->header, switch_bb);
892 }
893
894 set_immediate_dominator (CDI_DOMINATORS, succ_bb, switch_bb);
895
896 /* Compute new loop. */
897 add_loop (loop, outer);
898
899 /* Add switch_bb to appropriate loop. */
900 if (switch_bb->loop_father)
901 remove_bb_from_loops (switch_bb);
902 add_bb_to_loop (switch_bb, outer);
903
904 /* Fix counts. */
905 if (redirect_all_edges)
906 {
907 switch_bb->count = cnt;
908 }
909 scale_loop_frequencies (loop, false_scale);
910 scale_loop_frequencies (succ_bb->loop_father, true_scale);
911 update_dominators_in_loop (loop);
912
913 return loop;
914 }
915
916 /* Remove the latch edge of a LOOP and update loops to indicate that
917 the LOOP was removed. After this function, original loop latch will
918 have no successor, which caller is expected to fix somehow.
919
920 If this may cause the information about irreducible regions to become
921 invalid, IRRED_INVALIDATED is set to true.
922
923 LOOP_CLOSED_SSA_INVALIDATED, if non-NULL, is a bitmap where we store
924 basic blocks that had non-trivial update on their loop_father.*/
925
926 void
unloop(struct loop * loop,bool * irred_invalidated,bitmap loop_closed_ssa_invalidated)927 unloop (struct loop *loop, bool *irred_invalidated,
928 bitmap loop_closed_ssa_invalidated)
929 {
930 basic_block *body;
931 struct loop *ploop;
932 unsigned i, n;
933 basic_block latch = loop->latch;
934 bool dummy = false;
935
936 if (loop_preheader_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP)
937 *irred_invalidated = true;
938
939 /* This is relatively straightforward. The dominators are unchanged, as
940 loop header dominates loop latch, so the only thing we have to care of
941 is the placement of loops and basic blocks inside the loop tree. We
942 move them all to the loop->outer, and then let fix_bb_placements do
943 its work. */
944
945 body = get_loop_body (loop);
946 n = loop->num_nodes;
947 for (i = 0; i < n; i++)
948 if (body[i]->loop_father == loop)
949 {
950 remove_bb_from_loops (body[i]);
951 add_bb_to_loop (body[i], loop_outer (loop));
952 }
953 free (body);
954
955 while (loop->inner)
956 {
957 ploop = loop->inner;
958 flow_loop_tree_node_remove (ploop);
959 flow_loop_tree_node_add (loop_outer (loop), ploop);
960 }
961
962 /* Remove the loop and free its data. */
963 delete_loop (loop);
964
965 remove_edge (single_succ_edge (latch));
966
967 /* We do not pass IRRED_INVALIDATED to fix_bb_placements here, as even if
968 there is an irreducible region inside the cancelled loop, the flags will
969 be still correct. */
970 fix_bb_placements (latch, &dummy, loop_closed_ssa_invalidated);
971 }
972
973 /* Fix placement of superloops of LOOP inside loop tree, i.e. ensure that
974 condition stated in description of fix_loop_placement holds for them.
975 It is used in case when we removed some edges coming out of LOOP, which
976 may cause the right placement of LOOP inside loop tree to change.
977
978 IRRED_INVALIDATED is set to true if a change in the loop structures might
979 invalidate the information about irreducible regions. */
980
981 static void
fix_loop_placements(struct loop * loop,bool * irred_invalidated)982 fix_loop_placements (struct loop *loop, bool *irred_invalidated)
983 {
984 struct loop *outer;
985
986 while (loop_outer (loop))
987 {
988 outer = loop_outer (loop);
989 if (!fix_loop_placement (loop, irred_invalidated))
990 break;
991
992 /* Changing the placement of a loop in the loop tree may alter the
993 validity of condition 2) of the description of fix_bb_placement
994 for its preheader, because the successor is the header and belongs
995 to the loop. So call fix_bb_placements to fix up the placement
996 of the preheader and (possibly) of its predecessors. */
997 fix_bb_placements (loop_preheader_edge (loop)->src,
998 irred_invalidated, NULL);
999 loop = outer;
1000 }
1001 }
1002
1003 /* Duplicate loop bounds and other information we store about
1004 the loop into its duplicate. */
1005
1006 void
copy_loop_info(struct loop * loop,struct loop * target)1007 copy_loop_info (struct loop *loop, struct loop *target)
1008 {
1009 gcc_checking_assert (!target->any_upper_bound && !target->any_estimate);
1010 target->any_upper_bound = loop->any_upper_bound;
1011 target->nb_iterations_upper_bound = loop->nb_iterations_upper_bound;
1012 target->any_likely_upper_bound = loop->any_likely_upper_bound;
1013 target->nb_iterations_likely_upper_bound
1014 = loop->nb_iterations_likely_upper_bound;
1015 target->any_estimate = loop->any_estimate;
1016 target->nb_iterations_estimate = loop->nb_iterations_estimate;
1017 target->estimate_state = loop->estimate_state;
1018 target->constraints = loop->constraints;
1019 target->warned_aggressive_loop_optimizations
1020 |= loop->warned_aggressive_loop_optimizations;
1021 target->in_oacc_kernels_region = loop->in_oacc_kernels_region;
1022 target->owned_clique = loop->owned_clique;
1023 }
1024
1025 /* Copies copy of LOOP as subloop of TARGET loop, placing newly
1026 created loop into loops structure. If AFTER is non-null
1027 the new loop is added at AFTER->next, otherwise in front of TARGETs
1028 sibling list. */
1029 struct loop *
duplicate_loop(struct loop * loop,struct loop * target,struct loop * after)1030 duplicate_loop (struct loop *loop, struct loop *target, struct loop *after)
1031 {
1032 struct loop *cloop;
1033 cloop = alloc_loop ();
1034 place_new_loop (cfun, cloop);
1035
1036 copy_loop_info (loop, cloop);
1037
1038 /* Mark the new loop as copy of LOOP. */
1039 set_loop_copy (loop, cloop);
1040
1041 /* Add it to target. */
1042 flow_loop_tree_node_add (target, cloop, after);
1043
1044 return cloop;
1045 }
1046
1047 /* Copies structure of subloops of LOOP into TARGET loop, placing
1048 newly created loops into loop tree at the end of TARGETs sibling
1049 list in the original order. */
1050 void
duplicate_subloops(struct loop * loop,struct loop * target)1051 duplicate_subloops (struct loop *loop, struct loop *target)
1052 {
1053 struct loop *aloop, *cloop, *tail;
1054
1055 for (tail = target->inner; tail && tail->next; tail = tail->next)
1056 ;
1057 for (aloop = loop->inner; aloop; aloop = aloop->next)
1058 {
1059 cloop = duplicate_loop (aloop, target, tail);
1060 tail = cloop;
1061 gcc_assert(!tail->next);
1062 duplicate_subloops (aloop, cloop);
1063 }
1064 }
1065
1066 /* Copies structure of subloops of N loops, stored in array COPIED_LOOPS,
1067 into TARGET loop, placing newly created loops into loop tree adding
1068 them to TARGETs sibling list at the end in order. */
1069 static void
copy_loops_to(struct loop ** copied_loops,int n,struct loop * target)1070 copy_loops_to (struct loop **copied_loops, int n, struct loop *target)
1071 {
1072 struct loop *aloop, *tail;
1073 int i;
1074
1075 for (tail = target->inner; tail && tail->next; tail = tail->next)
1076 ;
1077 for (i = 0; i < n; i++)
1078 {
1079 aloop = duplicate_loop (copied_loops[i], target, tail);
1080 tail = aloop;
1081 gcc_assert(!tail->next);
1082 duplicate_subloops (copied_loops[i], aloop);
1083 }
1084 }
1085
1086 /* Redirects edge E to basic block DEST. */
1087 static void
loop_redirect_edge(edge e,basic_block dest)1088 loop_redirect_edge (edge e, basic_block dest)
1089 {
1090 if (e->dest == dest)
1091 return;
1092
1093 redirect_edge_and_branch_force (e, dest);
1094 }
1095
1096 /* Check whether LOOP's body can be duplicated. */
1097 bool
can_duplicate_loop_p(const struct loop * loop)1098 can_duplicate_loop_p (const struct loop *loop)
1099 {
1100 int ret;
1101 basic_block *bbs = get_loop_body (loop);
1102
1103 ret = can_copy_bbs_p (bbs, loop->num_nodes);
1104 free (bbs);
1105
1106 return ret;
1107 }
1108
1109 /* Duplicates body of LOOP to given edge E NDUPL times. Takes care of updating
1110 loop structure and dominators (order of inner subloops is retained).
1111 E's destination must be LOOP header for this to work, i.e. it must be entry
1112 or latch edge of this loop; these are unique, as the loops must have
1113 preheaders for this function to work correctly (in case E is latch, the
1114 function unrolls the loop, if E is entry edge, it peels the loop). Store
1115 edges created by copying ORIG edge from copies corresponding to set bits in
1116 WONT_EXIT bitmap (bit 0 corresponds to original LOOP body, the other copies
1117 are numbered in order given by control flow through them) into TO_REMOVE
1118 array. Returns false if duplication is
1119 impossible. */
1120
1121 bool
duplicate_loop_to_header_edge(struct loop * loop,edge e,unsigned int ndupl,sbitmap wont_exit,edge orig,vec<edge> * to_remove,int flags)1122 duplicate_loop_to_header_edge (struct loop *loop, edge e,
1123 unsigned int ndupl, sbitmap wont_exit,
1124 edge orig, vec<edge> *to_remove,
1125 int flags)
1126 {
1127 struct loop *target, *aloop;
1128 struct loop **orig_loops;
1129 unsigned n_orig_loops;
1130 basic_block header = loop->header, latch = loop->latch;
1131 basic_block *new_bbs, *bbs, *first_active;
1132 basic_block new_bb, bb, first_active_latch = NULL;
1133 edge ae, latch_edge;
1134 edge spec_edges[2], new_spec_edges[2];
1135 const int SE_LATCH = 0;
1136 const int SE_ORIG = 1;
1137 unsigned i, j, n;
1138 int is_latch = (latch == e->src);
1139 profile_probability *scale_step = NULL;
1140 profile_probability scale_main = profile_probability::always ();
1141 profile_probability scale_act = profile_probability::always ();
1142 profile_count after_exit_num = profile_count::zero (),
1143 after_exit_den = profile_count::zero ();
1144 bool scale_after_exit = false;
1145 int add_irreducible_flag;
1146 basic_block place_after;
1147 bitmap bbs_to_scale = NULL;
1148 bitmap_iterator bi;
1149
1150 gcc_assert (e->dest == loop->header);
1151 gcc_assert (ndupl > 0);
1152
1153 if (orig)
1154 {
1155 /* Orig must be edge out of the loop. */
1156 gcc_assert (flow_bb_inside_loop_p (loop, orig->src));
1157 gcc_assert (!flow_bb_inside_loop_p (loop, orig->dest));
1158 }
1159
1160 n = loop->num_nodes;
1161 bbs = get_loop_body_in_dom_order (loop);
1162 gcc_assert (bbs[0] == loop->header);
1163 gcc_assert (bbs[n - 1] == loop->latch);
1164
1165 /* Check whether duplication is possible. */
1166 if (!can_copy_bbs_p (bbs, loop->num_nodes))
1167 {
1168 free (bbs);
1169 return false;
1170 }
1171 new_bbs = XNEWVEC (basic_block, loop->num_nodes);
1172
1173 /* In case we are doing loop peeling and the loop is in the middle of
1174 irreducible region, the peeled copies will be inside it too. */
1175 add_irreducible_flag = e->flags & EDGE_IRREDUCIBLE_LOOP;
1176 gcc_assert (!is_latch || !add_irreducible_flag);
1177
1178 /* Find edge from latch. */
1179 latch_edge = loop_latch_edge (loop);
1180
1181 if (flags & DLTHE_FLAG_UPDATE_FREQ)
1182 {
1183 /* Calculate coefficients by that we have to scale counts
1184 of duplicated loop bodies. */
1185 profile_count count_in = header->count;
1186 profile_count count_le = latch_edge->count ();
1187 profile_count count_out_orig = orig ? orig->count () : count_in - count_le;
1188 profile_probability prob_pass_thru = count_le.probability_in (count_in);
1189 profile_probability prob_pass_wont_exit =
1190 (count_le + count_out_orig).probability_in (count_in);
1191
1192 if (orig && orig->probability.initialized_p ()
1193 && !(orig->probability == profile_probability::always ()))
1194 {
1195 /* The blocks that are dominated by a removed exit edge ORIG have
1196 frequencies scaled by this. */
1197 if (orig->count ().initialized_p ())
1198 {
1199 after_exit_num = orig->src->count;
1200 after_exit_den = after_exit_num - orig->count ();
1201 scale_after_exit = true;
1202 }
1203 bbs_to_scale = BITMAP_ALLOC (NULL);
1204 for (i = 0; i < n; i++)
1205 {
1206 if (bbs[i] != orig->src
1207 && dominated_by_p (CDI_DOMINATORS, bbs[i], orig->src))
1208 bitmap_set_bit (bbs_to_scale, i);
1209 }
1210 }
1211
1212 scale_step = XNEWVEC (profile_probability, ndupl);
1213
1214 for (i = 1; i <= ndupl; i++)
1215 scale_step[i - 1] = bitmap_bit_p (wont_exit, i)
1216 ? prob_pass_wont_exit
1217 : prob_pass_thru;
1218
1219 /* Complete peeling is special as the probability of exit in last
1220 copy becomes 1. */
1221 if (flags & DLTHE_FLAG_COMPLETTE_PEEL)
1222 {
1223 profile_count wanted_count = e->count ();
1224
1225 gcc_assert (!is_latch);
1226 /* First copy has count of incoming edge. Each subsequent
1227 count should be reduced by prob_pass_wont_exit. Caller
1228 should've managed the flags so all except for original loop
1229 has won't exist set. */
1230 scale_act = wanted_count.probability_in (count_in);
1231 /* Now simulate the duplication adjustments and compute header
1232 frequency of the last copy. */
1233 for (i = 0; i < ndupl; i++)
1234 wanted_count = wanted_count.apply_probability (scale_step [i]);
1235 scale_main = wanted_count.probability_in (count_in);
1236 }
1237 /* Here we insert loop bodies inside the loop itself (for loop unrolling).
1238 First iteration will be original loop followed by duplicated bodies.
1239 It is necessary to scale down the original so we get right overall
1240 number of iterations. */
1241 else if (is_latch)
1242 {
1243 profile_probability prob_pass_main = bitmap_bit_p (wont_exit, 0)
1244 ? prob_pass_wont_exit
1245 : prob_pass_thru;
1246 profile_probability p = prob_pass_main;
1247 profile_count scale_main_den = count_in;
1248 for (i = 0; i < ndupl; i++)
1249 {
1250 scale_main_den += count_in.apply_probability (p);
1251 p = p * scale_step[i];
1252 }
1253 /* If original loop is executed COUNT_IN times, the unrolled
1254 loop will account SCALE_MAIN_DEN times. */
1255 scale_main = count_in.probability_in (scale_main_den);
1256 scale_act = scale_main * prob_pass_main;
1257 }
1258 else
1259 {
1260 profile_count preheader_count = e->count ();
1261 for (i = 0; i < ndupl; i++)
1262 scale_main = scale_main * scale_step[i];
1263 scale_act = preheader_count.probability_in (count_in);
1264 }
1265 }
1266
1267 /* Loop the new bbs will belong to. */
1268 target = e->src->loop_father;
1269
1270 /* Original loops. */
1271 n_orig_loops = 0;
1272 for (aloop = loop->inner; aloop; aloop = aloop->next)
1273 n_orig_loops++;
1274 orig_loops = XNEWVEC (struct loop *, n_orig_loops);
1275 for (aloop = loop->inner, i = 0; aloop; aloop = aloop->next, i++)
1276 orig_loops[i] = aloop;
1277
1278 set_loop_copy (loop, target);
1279
1280 first_active = XNEWVEC (basic_block, n);
1281 if (is_latch)
1282 {
1283 memcpy (first_active, bbs, n * sizeof (basic_block));
1284 first_active_latch = latch;
1285 }
1286
1287 spec_edges[SE_ORIG] = orig;
1288 spec_edges[SE_LATCH] = latch_edge;
1289
1290 place_after = e->src;
1291 for (j = 0; j < ndupl; j++)
1292 {
1293 /* Copy loops. */
1294 copy_loops_to (orig_loops, n_orig_loops, target);
1295
1296 /* Copy bbs. */
1297 copy_bbs (bbs, n, new_bbs, spec_edges, 2, new_spec_edges, loop,
1298 place_after, true);
1299 place_after = new_spec_edges[SE_LATCH]->src;
1300
1301 if (flags & DLTHE_RECORD_COPY_NUMBER)
1302 for (i = 0; i < n; i++)
1303 {
1304 gcc_assert (!new_bbs[i]->aux);
1305 new_bbs[i]->aux = (void *)(size_t)(j + 1);
1306 }
1307
1308 /* Note whether the blocks and edges belong to an irreducible loop. */
1309 if (add_irreducible_flag)
1310 {
1311 for (i = 0; i < n; i++)
1312 new_bbs[i]->flags |= BB_DUPLICATED;
1313 for (i = 0; i < n; i++)
1314 {
1315 edge_iterator ei;
1316 new_bb = new_bbs[i];
1317 if (new_bb->loop_father == target)
1318 new_bb->flags |= BB_IRREDUCIBLE_LOOP;
1319
1320 FOR_EACH_EDGE (ae, ei, new_bb->succs)
1321 if ((ae->dest->flags & BB_DUPLICATED)
1322 && (ae->src->loop_father == target
1323 || ae->dest->loop_father == target))
1324 ae->flags |= EDGE_IRREDUCIBLE_LOOP;
1325 }
1326 for (i = 0; i < n; i++)
1327 new_bbs[i]->flags &= ~BB_DUPLICATED;
1328 }
1329
1330 /* Redirect the special edges. */
1331 if (is_latch)
1332 {
1333 redirect_edge_and_branch_force (latch_edge, new_bbs[0]);
1334 redirect_edge_and_branch_force (new_spec_edges[SE_LATCH],
1335 loop->header);
1336 set_immediate_dominator (CDI_DOMINATORS, new_bbs[0], latch);
1337 latch = loop->latch = new_bbs[n - 1];
1338 e = latch_edge = new_spec_edges[SE_LATCH];
1339 }
1340 else
1341 {
1342 redirect_edge_and_branch_force (new_spec_edges[SE_LATCH],
1343 loop->header);
1344 redirect_edge_and_branch_force (e, new_bbs[0]);
1345 set_immediate_dominator (CDI_DOMINATORS, new_bbs[0], e->src);
1346 e = new_spec_edges[SE_LATCH];
1347 }
1348
1349 /* Record exit edge in this copy. */
1350 if (orig && bitmap_bit_p (wont_exit, j + 1))
1351 {
1352 if (to_remove)
1353 to_remove->safe_push (new_spec_edges[SE_ORIG]);
1354 force_edge_cold (new_spec_edges[SE_ORIG], true);
1355
1356 /* Scale the frequencies of the blocks dominated by the exit. */
1357 if (bbs_to_scale && scale_after_exit)
1358 {
1359 EXECUTE_IF_SET_IN_BITMAP (bbs_to_scale, 0, i, bi)
1360 scale_bbs_frequencies_profile_count (new_bbs + i, 1, after_exit_num,
1361 after_exit_den);
1362 }
1363 }
1364
1365 /* Record the first copy in the control flow order if it is not
1366 the original loop (i.e. in case of peeling). */
1367 if (!first_active_latch)
1368 {
1369 memcpy (first_active, new_bbs, n * sizeof (basic_block));
1370 first_active_latch = new_bbs[n - 1];
1371 }
1372
1373 /* Set counts and frequencies. */
1374 if (flags & DLTHE_FLAG_UPDATE_FREQ)
1375 {
1376 scale_bbs_frequencies (new_bbs, n, scale_act);
1377 scale_act = scale_act * scale_step[j];
1378 }
1379 }
1380 free (new_bbs);
1381 free (orig_loops);
1382
1383 /* Record the exit edge in the original loop body, and update the frequencies. */
1384 if (orig && bitmap_bit_p (wont_exit, 0))
1385 {
1386 if (to_remove)
1387 to_remove->safe_push (orig);
1388 force_edge_cold (orig, true);
1389
1390 /* Scale the frequencies of the blocks dominated by the exit. */
1391 if (bbs_to_scale && scale_after_exit)
1392 {
1393 EXECUTE_IF_SET_IN_BITMAP (bbs_to_scale, 0, i, bi)
1394 scale_bbs_frequencies_profile_count (bbs + i, 1, after_exit_num,
1395 after_exit_den);
1396 }
1397 }
1398
1399 /* Update the original loop. */
1400 if (!is_latch)
1401 set_immediate_dominator (CDI_DOMINATORS, e->dest, e->src);
1402 if (flags & DLTHE_FLAG_UPDATE_FREQ)
1403 {
1404 scale_bbs_frequencies (bbs, n, scale_main);
1405 free (scale_step);
1406 }
1407
1408 /* Update dominators of outer blocks if affected. */
1409 for (i = 0; i < n; i++)
1410 {
1411 basic_block dominated, dom_bb;
1412 vec<basic_block> dom_bbs;
1413 unsigned j;
1414
1415 bb = bbs[i];
1416 bb->aux = 0;
1417
1418 dom_bbs = get_dominated_by (CDI_DOMINATORS, bb);
1419 FOR_EACH_VEC_ELT (dom_bbs, j, dominated)
1420 {
1421 if (flow_bb_inside_loop_p (loop, dominated))
1422 continue;
1423 dom_bb = nearest_common_dominator (
1424 CDI_DOMINATORS, first_active[i], first_active_latch);
1425 set_immediate_dominator (CDI_DOMINATORS, dominated, dom_bb);
1426 }
1427 dom_bbs.release ();
1428 }
1429 free (first_active);
1430
1431 free (bbs);
1432 BITMAP_FREE (bbs_to_scale);
1433
1434 return true;
1435 }
1436
1437 /* A callback for make_forwarder block, to redirect all edges except for
1438 MFB_KJ_EDGE to the entry part. E is the edge for that we should decide
1439 whether to redirect it. */
1440
1441 edge mfb_kj_edge;
1442 bool
mfb_keep_just(edge e)1443 mfb_keep_just (edge e)
1444 {
1445 return e != mfb_kj_edge;
1446 }
1447
1448 /* True when a candidate preheader BLOCK has predecessors from LOOP. */
1449
1450 static bool
has_preds_from_loop(basic_block block,struct loop * loop)1451 has_preds_from_loop (basic_block block, struct loop *loop)
1452 {
1453 edge e;
1454 edge_iterator ei;
1455
1456 FOR_EACH_EDGE (e, ei, block->preds)
1457 if (e->src->loop_father == loop)
1458 return true;
1459 return false;
1460 }
1461
1462 /* Creates a pre-header for a LOOP. Returns newly created block. Unless
1463 CP_SIMPLE_PREHEADERS is set in FLAGS, we only force LOOP to have single
1464 entry; otherwise we also force preheader block to have only one successor.
1465 When CP_FALLTHRU_PREHEADERS is set in FLAGS, we force the preheader block
1466 to be a fallthru predecessor to the loop header and to have only
1467 predecessors from outside of the loop.
1468 The function also updates dominators. */
1469
1470 basic_block
create_preheader(struct loop * loop,int flags)1471 create_preheader (struct loop *loop, int flags)
1472 {
1473 edge e;
1474 basic_block dummy;
1475 int nentry = 0;
1476 bool irred = false;
1477 bool latch_edge_was_fallthru;
1478 edge one_succ_pred = NULL, single_entry = NULL;
1479 edge_iterator ei;
1480
1481 FOR_EACH_EDGE (e, ei, loop->header->preds)
1482 {
1483 if (e->src == loop->latch)
1484 continue;
1485 irred |= (e->flags & EDGE_IRREDUCIBLE_LOOP) != 0;
1486 nentry++;
1487 single_entry = e;
1488 if (single_succ_p (e->src))
1489 one_succ_pred = e;
1490 }
1491 gcc_assert (nentry);
1492 if (nentry == 1)
1493 {
1494 bool need_forwarder_block = false;
1495
1496 /* We do not allow entry block to be the loop preheader, since we
1497 cannot emit code there. */
1498 if (single_entry->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1499 need_forwarder_block = true;
1500 else
1501 {
1502 /* If we want simple preheaders, also force the preheader to have
1503 just a single successor. */
1504 if ((flags & CP_SIMPLE_PREHEADERS)
1505 && !single_succ_p (single_entry->src))
1506 need_forwarder_block = true;
1507 /* If we want fallthru preheaders, also create forwarder block when
1508 preheader ends with a jump or has predecessors from loop. */
1509 else if ((flags & CP_FALLTHRU_PREHEADERS)
1510 && (JUMP_P (BB_END (single_entry->src))
1511 || has_preds_from_loop (single_entry->src, loop)))
1512 need_forwarder_block = true;
1513 }
1514 if (! need_forwarder_block)
1515 return NULL;
1516 }
1517
1518 mfb_kj_edge = loop_latch_edge (loop);
1519 latch_edge_was_fallthru = (mfb_kj_edge->flags & EDGE_FALLTHRU) != 0;
1520 if (nentry == 1
1521 && ((flags & CP_FALLTHRU_PREHEADERS) == 0
1522 || (single_entry->flags & EDGE_CROSSING) == 0))
1523 dummy = split_edge (single_entry);
1524 else
1525 {
1526 edge fallthru = make_forwarder_block (loop->header, mfb_keep_just, NULL);
1527 dummy = fallthru->src;
1528 loop->header = fallthru->dest;
1529 }
1530
1531 /* Try to be clever in placing the newly created preheader. The idea is to
1532 avoid breaking any "fallthruness" relationship between blocks.
1533
1534 The preheader was created just before the header and all incoming edges
1535 to the header were redirected to the preheader, except the latch edge.
1536 So the only problematic case is when this latch edge was a fallthru
1537 edge: it is not anymore after the preheader creation so we have broken
1538 the fallthruness. We're therefore going to look for a better place. */
1539 if (latch_edge_was_fallthru)
1540 {
1541 if (one_succ_pred)
1542 e = one_succ_pred;
1543 else
1544 e = EDGE_PRED (dummy, 0);
1545
1546 move_block_after (dummy, e->src);
1547 }
1548
1549 if (irred)
1550 {
1551 dummy->flags |= BB_IRREDUCIBLE_LOOP;
1552 single_succ_edge (dummy)->flags |= EDGE_IRREDUCIBLE_LOOP;
1553 }
1554
1555 if (dump_file)
1556 fprintf (dump_file, "Created preheader block for loop %i\n",
1557 loop->num);
1558
1559 if (flags & CP_FALLTHRU_PREHEADERS)
1560 gcc_assert ((single_succ_edge (dummy)->flags & EDGE_FALLTHRU)
1561 && !JUMP_P (BB_END (dummy)));
1562
1563 return dummy;
1564 }
1565
1566 /* Create preheaders for each loop; for meaning of FLAGS see create_preheader. */
1567
1568 void
create_preheaders(int flags)1569 create_preheaders (int flags)
1570 {
1571 struct loop *loop;
1572
1573 if (!current_loops)
1574 return;
1575
1576 FOR_EACH_LOOP (loop, 0)
1577 create_preheader (loop, flags);
1578 loops_state_set (LOOPS_HAVE_PREHEADERS);
1579 }
1580
1581 /* Forces all loop latches to have only single successor. */
1582
1583 void
force_single_succ_latches(void)1584 force_single_succ_latches (void)
1585 {
1586 struct loop *loop;
1587 edge e;
1588
1589 FOR_EACH_LOOP (loop, 0)
1590 {
1591 if (loop->latch != loop->header && single_succ_p (loop->latch))
1592 continue;
1593
1594 e = find_edge (loop->latch, loop->header);
1595 gcc_checking_assert (e != NULL);
1596
1597 split_edge (e);
1598 }
1599 loops_state_set (LOOPS_HAVE_SIMPLE_LATCHES);
1600 }
1601
1602 /* This function is called from loop_version. It splits the entry edge
1603 of the loop we want to version, adds the versioning condition, and
1604 adjust the edges to the two versions of the loop appropriately.
1605 e is an incoming edge. Returns the basic block containing the
1606 condition.
1607
1608 --- edge e ---- > [second_head]
1609
1610 Split it and insert new conditional expression and adjust edges.
1611
1612 --- edge e ---> [cond expr] ---> [first_head]
1613 |
1614 +---------> [second_head]
1615
1616 THEN_PROB is the probability of then branch of the condition.
1617 ELSE_PROB is the probability of else branch. Note that they may be both
1618 REG_BR_PROB_BASE when condition is IFN_LOOP_VECTORIZED or
1619 IFN_LOOP_DIST_ALIAS. */
1620
1621 static basic_block
lv_adjust_loop_entry_edge(basic_block first_head,basic_block second_head,edge e,void * cond_expr,profile_probability then_prob,profile_probability else_prob)1622 lv_adjust_loop_entry_edge (basic_block first_head, basic_block second_head,
1623 edge e, void *cond_expr,
1624 profile_probability then_prob,
1625 profile_probability else_prob)
1626 {
1627 basic_block new_head = NULL;
1628 edge e1;
1629
1630 gcc_assert (e->dest == second_head);
1631
1632 /* Split edge 'e'. This will create a new basic block, where we can
1633 insert conditional expr. */
1634 new_head = split_edge (e);
1635
1636 lv_add_condition_to_bb (first_head, second_head, new_head,
1637 cond_expr);
1638
1639 /* Don't set EDGE_TRUE_VALUE in RTL mode, as it's invalid there. */
1640 e = single_succ_edge (new_head);
1641 e1 = make_edge (new_head, first_head,
1642 current_ir_type () == IR_GIMPLE ? EDGE_TRUE_VALUE : 0);
1643 e1->probability = then_prob;
1644 e->probability = else_prob;
1645
1646 set_immediate_dominator (CDI_DOMINATORS, first_head, new_head);
1647 set_immediate_dominator (CDI_DOMINATORS, second_head, new_head);
1648
1649 /* Adjust loop header phi nodes. */
1650 lv_adjust_loop_header_phi (first_head, second_head, new_head, e1);
1651
1652 return new_head;
1653 }
1654
1655 /* Main entry point for Loop Versioning transformation.
1656
1657 This transformation given a condition and a loop, creates
1658 -if (condition) { loop_copy1 } else { loop_copy2 },
1659 where loop_copy1 is the loop transformed in one way, and loop_copy2
1660 is the loop transformed in another way (or unchanged). COND_EXPR
1661 may be a run time test for things that were not resolved by static
1662 analysis (overlapping ranges (anti-aliasing), alignment, etc.).
1663
1664 If non-NULL, CONDITION_BB is set to the basic block containing the
1665 condition.
1666
1667 THEN_PROB is the probability of the then edge of the if. THEN_SCALE
1668 is the ratio by that the frequencies in the original loop should
1669 be scaled. ELSE_SCALE is the ratio by that the frequencies in the
1670 new loop should be scaled.
1671
1672 If PLACE_AFTER is true, we place the new loop after LOOP in the
1673 instruction stream, otherwise it is placed before LOOP. */
1674
1675 struct loop *
loop_version(struct loop * loop,void * cond_expr,basic_block * condition_bb,profile_probability then_prob,profile_probability else_prob,profile_probability then_scale,profile_probability else_scale,bool place_after)1676 loop_version (struct loop *loop,
1677 void *cond_expr, basic_block *condition_bb,
1678 profile_probability then_prob, profile_probability else_prob,
1679 profile_probability then_scale, profile_probability else_scale,
1680 bool place_after)
1681 {
1682 basic_block first_head, second_head;
1683 edge entry, latch_edge, true_edge, false_edge;
1684 int irred_flag;
1685 struct loop *nloop;
1686 basic_block cond_bb;
1687
1688 /* Record entry and latch edges for the loop */
1689 entry = loop_preheader_edge (loop);
1690 irred_flag = entry->flags & EDGE_IRREDUCIBLE_LOOP;
1691 entry->flags &= ~EDGE_IRREDUCIBLE_LOOP;
1692
1693 /* Note down head of loop as first_head. */
1694 first_head = entry->dest;
1695
1696 /* Duplicate loop. */
1697 if (!cfg_hook_duplicate_loop_to_header_edge (loop, entry, 1,
1698 NULL, NULL, NULL, 0))
1699 {
1700 entry->flags |= irred_flag;
1701 return NULL;
1702 }
1703
1704 /* After duplication entry edge now points to new loop head block.
1705 Note down new head as second_head. */
1706 second_head = entry->dest;
1707
1708 /* Split loop entry edge and insert new block with cond expr. */
1709 cond_bb = lv_adjust_loop_entry_edge (first_head, second_head,
1710 entry, cond_expr, then_prob, else_prob);
1711 if (condition_bb)
1712 *condition_bb = cond_bb;
1713
1714 if (!cond_bb)
1715 {
1716 entry->flags |= irred_flag;
1717 return NULL;
1718 }
1719
1720 latch_edge = single_succ_edge (get_bb_copy (loop->latch));
1721
1722 extract_cond_bb_edges (cond_bb, &true_edge, &false_edge);
1723 nloop = loopify (latch_edge,
1724 single_pred_edge (get_bb_copy (loop->header)),
1725 cond_bb, true_edge, false_edge,
1726 false /* Do not redirect all edges. */,
1727 then_scale, else_scale);
1728
1729 copy_loop_info (loop, nloop);
1730
1731 /* loopify redirected latch_edge. Update its PENDING_STMTS. */
1732 lv_flush_pending_stmts (latch_edge);
1733
1734 /* loopify redirected condition_bb's succ edge. Update its PENDING_STMTS. */
1735 extract_cond_bb_edges (cond_bb, &true_edge, &false_edge);
1736 lv_flush_pending_stmts (false_edge);
1737 /* Adjust irreducible flag. */
1738 if (irred_flag)
1739 {
1740 cond_bb->flags |= BB_IRREDUCIBLE_LOOP;
1741 loop_preheader_edge (loop)->flags |= EDGE_IRREDUCIBLE_LOOP;
1742 loop_preheader_edge (nloop)->flags |= EDGE_IRREDUCIBLE_LOOP;
1743 single_pred_edge (cond_bb)->flags |= EDGE_IRREDUCIBLE_LOOP;
1744 }
1745
1746 if (place_after)
1747 {
1748 basic_block *bbs = get_loop_body_in_dom_order (nloop), after;
1749 unsigned i;
1750
1751 after = loop->latch;
1752
1753 for (i = 0; i < nloop->num_nodes; i++)
1754 {
1755 move_block_after (bbs[i], after);
1756 after = bbs[i];
1757 }
1758 free (bbs);
1759 }
1760
1761 /* At this point condition_bb is loop preheader with two successors,
1762 first_head and second_head. Make sure that loop preheader has only
1763 one successor. */
1764 split_edge (loop_preheader_edge (loop));
1765 split_edge (loop_preheader_edge (nloop));
1766
1767 return nloop;
1768 }
1769