1 /* Control flow graph manipulation code for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This file contains low level functions to manipulate the CFG and analyze it
21 that are aware of the RTL intermediate language.
22
23 Available functionality:
24 - Basic CFG/RTL manipulation API documented in cfghooks.h
25 - CFG-aware instruction chain manipulation
26 delete_insn, delete_insn_chain
27 - Edge splitting and committing to edges
28 insert_insn_on_edge, commit_edge_insertions
29 - CFG updating after insn simplification
30 purge_dead_edges, purge_all_dead_edges
31 - CFG fixing after coarse manipulation
32 fixup_abnormal_edges
33
34 Functions not supposed for generic use:
35 - Infrastructure to determine quickly basic block for insn
36 compute_bb_for_insn, update_bb_for_insn, set_block_for_insn,
37 - Edge redirection with updating and optimizing of insn chain
38 block_label, tidy_fallthru_edge, force_nonfallthru */
39
40 #include "config.h"
41 #include "system.h"
42 #include "coretypes.h"
43 #include "backend.h"
44 #include "target.h"
45 #include "rtl.h"
46 #include "tree.h"
47 #include "cfghooks.h"
48 #include "df.h"
49 #include "insn-config.h"
50 #include "memmodel.h"
51 #include "emit-rtl.h"
52 #include "cfgrtl.h"
53 #include "cfganal.h"
54 #include "cfgbuild.h"
55 #include "cfgcleanup.h"
56 #include "bb-reorder.h"
57 #include "rtl-error.h"
58 #include "insn-attr.h"
59 #include "dojump.h"
60 #include "expr.h"
61 #include "cfgloop.h"
62 #include "tree-pass.h"
63 #include "print-rtl.h"
64
65 /* Holds the interesting leading and trailing notes for the function.
66 Only applicable if the CFG is in cfglayout mode. */
67 static GTY(()) rtx_insn *cfg_layout_function_footer;
68 static GTY(()) rtx_insn *cfg_layout_function_header;
69
70 static rtx_insn *skip_insns_after_block (basic_block);
71 static void record_effective_endpoints (void);
72 static void fixup_reorder_chain (void);
73
74 void verify_insn_chain (void);
75 static void fixup_fallthru_exit_predecessor (void);
76 static int can_delete_note_p (const rtx_note *);
77 static int can_delete_label_p (const rtx_code_label *);
78 static basic_block rtl_split_edge (edge);
79 static bool rtl_move_block_after (basic_block, basic_block);
80 static int rtl_verify_flow_info (void);
81 static basic_block cfg_layout_split_block (basic_block, void *);
82 static edge cfg_layout_redirect_edge_and_branch (edge, basic_block);
83 static basic_block cfg_layout_redirect_edge_and_branch_force (edge, basic_block);
84 static void cfg_layout_delete_block (basic_block);
85 static void rtl_delete_block (basic_block);
86 static basic_block rtl_redirect_edge_and_branch_force (edge, basic_block);
87 static edge rtl_redirect_edge_and_branch (edge, basic_block);
88 static basic_block rtl_split_block (basic_block, void *);
89 static void rtl_dump_bb (FILE *, basic_block, int, dump_flags_t);
90 static int rtl_verify_flow_info_1 (void);
91 static void rtl_make_forwarder_block (edge);
92
93 /* Return true if NOTE is not one of the ones that must be kept paired,
94 so that we may simply delete it. */
95
96 static int
can_delete_note_p(const rtx_note * note)97 can_delete_note_p (const rtx_note *note)
98 {
99 switch (NOTE_KIND (note))
100 {
101 case NOTE_INSN_DELETED:
102 case NOTE_INSN_BASIC_BLOCK:
103 case NOTE_INSN_EPILOGUE_BEG:
104 return true;
105
106 default:
107 return false;
108 }
109 }
110
111 /* True if a given label can be deleted. */
112
113 static int
can_delete_label_p(const rtx_code_label * label)114 can_delete_label_p (const rtx_code_label *label)
115 {
116 return (!LABEL_PRESERVE_P (label)
117 /* User declared labels must be preserved. */
118 && LABEL_NAME (label) == 0
119 && !vec_safe_contains<rtx_insn *> (forced_labels,
120 const_cast<rtx_code_label *> (label)));
121 }
122
123 /* Delete INSN by patching it out. */
124
125 void
delete_insn(rtx_insn * insn)126 delete_insn (rtx_insn *insn)
127 {
128 rtx note;
129 bool really_delete = true;
130
131 if (LABEL_P (insn))
132 {
133 /* Some labels can't be directly removed from the INSN chain, as they
134 might be references via variables, constant pool etc.
135 Convert them to the special NOTE_INSN_DELETED_LABEL note. */
136 if (! can_delete_label_p (as_a <rtx_code_label *> (insn)))
137 {
138 const char *name = LABEL_NAME (insn);
139 basic_block bb = BLOCK_FOR_INSN (insn);
140 rtx_insn *bb_note = NEXT_INSN (insn);
141
142 really_delete = false;
143 PUT_CODE (insn, NOTE);
144 NOTE_KIND (insn) = NOTE_INSN_DELETED_LABEL;
145 NOTE_DELETED_LABEL_NAME (insn) = name;
146
147 /* If the note following the label starts a basic block, and the
148 label is a member of the same basic block, interchange the two. */
149 if (bb_note != NULL_RTX
150 && NOTE_INSN_BASIC_BLOCK_P (bb_note)
151 && bb != NULL
152 && bb == BLOCK_FOR_INSN (bb_note))
153 {
154 reorder_insns_nobb (insn, insn, bb_note);
155 BB_HEAD (bb) = bb_note;
156 if (BB_END (bb) == bb_note)
157 BB_END (bb) = insn;
158 }
159 }
160
161 remove_node_from_insn_list (insn, &nonlocal_goto_handler_labels);
162 }
163
164 if (really_delete)
165 {
166 /* If this insn has already been deleted, something is very wrong. */
167 gcc_assert (!insn->deleted ());
168 if (INSN_P (insn))
169 df_insn_delete (insn);
170 remove_insn (insn);
171 insn->set_deleted ();
172 }
173
174 /* If deleting a jump, decrement the use count of the label. Deleting
175 the label itself should happen in the normal course of block merging. */
176 if (JUMP_P (insn))
177 {
178 if (JUMP_LABEL (insn)
179 && LABEL_P (JUMP_LABEL (insn)))
180 LABEL_NUSES (JUMP_LABEL (insn))--;
181
182 /* If there are more targets, remove them too. */
183 while ((note
184 = find_reg_note (insn, REG_LABEL_TARGET, NULL_RTX)) != NULL_RTX
185 && LABEL_P (XEXP (note, 0)))
186 {
187 LABEL_NUSES (XEXP (note, 0))--;
188 remove_note (insn, note);
189 }
190 }
191
192 /* Also if deleting any insn that references a label as an operand. */
193 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX)) != NULL_RTX
194 && LABEL_P (XEXP (note, 0)))
195 {
196 LABEL_NUSES (XEXP (note, 0))--;
197 remove_note (insn, note);
198 }
199
200 if (rtx_jump_table_data *table = dyn_cast <rtx_jump_table_data *> (insn))
201 {
202 rtvec vec = table->get_labels ();
203 int len = GET_NUM_ELEM (vec);
204 int i;
205
206 for (i = 0; i < len; i++)
207 {
208 rtx label = XEXP (RTVEC_ELT (vec, i), 0);
209
210 /* When deleting code in bulk (e.g. removing many unreachable
211 blocks) we can delete a label that's a target of the vector
212 before deleting the vector itself. */
213 if (!NOTE_P (label))
214 LABEL_NUSES (label)--;
215 }
216 }
217 }
218
219 /* Like delete_insn but also purge dead edges from BB.
220 Return true if any edges are eliminated. */
221
222 bool
delete_insn_and_edges(rtx_insn * insn)223 delete_insn_and_edges (rtx_insn *insn)
224 {
225 bool purge = false;
226
227 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
228 {
229 basic_block bb = BLOCK_FOR_INSN (insn);
230 if (BB_END (bb) == insn)
231 purge = true;
232 else if (DEBUG_INSN_P (BB_END (bb)))
233 for (rtx_insn *dinsn = NEXT_INSN (insn);
234 DEBUG_INSN_P (dinsn); dinsn = NEXT_INSN (dinsn))
235 if (BB_END (bb) == dinsn)
236 {
237 purge = true;
238 break;
239 }
240 }
241 delete_insn (insn);
242 if (purge)
243 return purge_dead_edges (BLOCK_FOR_INSN (insn));
244 return false;
245 }
246
247 /* Unlink a chain of insns between START and FINISH, leaving notes
248 that must be paired. If CLEAR_BB is true, we set bb field for
249 insns that cannot be removed to NULL. */
250
251 void
delete_insn_chain(rtx start,rtx_insn * finish,bool clear_bb)252 delete_insn_chain (rtx start, rtx_insn *finish, bool clear_bb)
253 {
254 /* Unchain the insns one by one. It would be quicker to delete all of these
255 with a single unchaining, rather than one at a time, but we need to keep
256 the NOTE's. */
257 rtx_insn *current = finish;
258 while (1)
259 {
260 rtx_insn *prev = PREV_INSN (current);
261 if (NOTE_P (current) && !can_delete_note_p (as_a <rtx_note *> (current)))
262 ;
263 else
264 delete_insn (current);
265
266 if (clear_bb && !current->deleted ())
267 set_block_for_insn (current, NULL);
268
269 if (current == start)
270 break;
271 current = prev;
272 }
273 }
274
275 /* Create a new basic block consisting of the instructions between HEAD and END
276 inclusive. This function is designed to allow fast BB construction - reuses
277 the note and basic block struct in BB_NOTE, if any and do not grow
278 BASIC_BLOCK chain and should be used directly only by CFG construction code.
279 END can be NULL in to create new empty basic block before HEAD. Both END
280 and HEAD can be NULL to create basic block at the end of INSN chain.
281 AFTER is the basic block we should be put after. */
282
283 basic_block
create_basic_block_structure(rtx_insn * head,rtx_insn * end,rtx_note * bb_note,basic_block after)284 create_basic_block_structure (rtx_insn *head, rtx_insn *end, rtx_note *bb_note,
285 basic_block after)
286 {
287 basic_block bb;
288
289 if (bb_note
290 && (bb = NOTE_BASIC_BLOCK (bb_note)) != NULL
291 && bb->aux == NULL)
292 {
293 /* If we found an existing note, thread it back onto the chain. */
294
295 rtx_insn *after;
296
297 if (LABEL_P (head))
298 after = head;
299 else
300 {
301 after = PREV_INSN (head);
302 head = bb_note;
303 }
304
305 if (after != bb_note && NEXT_INSN (after) != bb_note)
306 reorder_insns_nobb (bb_note, bb_note, after);
307 }
308 else
309 {
310 /* Otherwise we must create a note and a basic block structure. */
311
312 bb = alloc_block ();
313
314 init_rtl_bb_info (bb);
315 if (!head && !end)
316 head = end = bb_note
317 = emit_note_after (NOTE_INSN_BASIC_BLOCK, get_last_insn ());
318 else if (LABEL_P (head) && end)
319 {
320 bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, head);
321 if (head == end)
322 end = bb_note;
323 }
324 else
325 {
326 bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, head);
327 head = bb_note;
328 if (!end)
329 end = head;
330 }
331
332 NOTE_BASIC_BLOCK (bb_note) = bb;
333 }
334
335 /* Always include the bb note in the block. */
336 if (NEXT_INSN (end) == bb_note)
337 end = bb_note;
338
339 BB_HEAD (bb) = head;
340 BB_END (bb) = end;
341 bb->index = last_basic_block_for_fn (cfun)++;
342 bb->flags = BB_NEW | BB_RTL;
343 link_block (bb, after);
344 SET_BASIC_BLOCK_FOR_FN (cfun, bb->index, bb);
345 df_bb_refs_record (bb->index, false);
346 update_bb_for_insn (bb);
347 BB_SET_PARTITION (bb, BB_UNPARTITIONED);
348
349 /* Tag the block so that we know it has been used when considering
350 other basic block notes. */
351 bb->aux = bb;
352
353 return bb;
354 }
355
356 /* Create new basic block consisting of instructions in between HEAD and END
357 and place it to the BB chain after block AFTER. END can be NULL to
358 create a new empty basic block before HEAD. Both END and HEAD can be
359 NULL to create basic block at the end of INSN chain. */
360
361 static basic_block
rtl_create_basic_block(void * headp,void * endp,basic_block after)362 rtl_create_basic_block (void *headp, void *endp, basic_block after)
363 {
364 rtx_insn *head = (rtx_insn *) headp;
365 rtx_insn *end = (rtx_insn *) endp;
366 basic_block bb;
367
368 /* Grow the basic block array if needed. */
369 if ((size_t) last_basic_block_for_fn (cfun)
370 >= basic_block_info_for_fn (cfun)->length ())
371 {
372 size_t new_size =
373 (last_basic_block_for_fn (cfun)
374 + (last_basic_block_for_fn (cfun) + 3) / 4);
375 vec_safe_grow_cleared (basic_block_info_for_fn (cfun), new_size);
376 }
377
378 n_basic_blocks_for_fn (cfun)++;
379
380 bb = create_basic_block_structure (head, end, NULL, after);
381 bb->aux = NULL;
382 return bb;
383 }
384
385 static basic_block
cfg_layout_create_basic_block(void * head,void * end,basic_block after)386 cfg_layout_create_basic_block (void *head, void *end, basic_block after)
387 {
388 basic_block newbb = rtl_create_basic_block (head, end, after);
389
390 return newbb;
391 }
392
393 /* Delete the insns in a (non-live) block. We physically delete every
394 non-deleted-note insn, and update the flow graph appropriately.
395
396 Return nonzero if we deleted an exception handler. */
397
398 /* ??? Preserving all such notes strikes me as wrong. It would be nice
399 to post-process the stream to remove empty blocks, loops, ranges, etc. */
400
401 static void
rtl_delete_block(basic_block b)402 rtl_delete_block (basic_block b)
403 {
404 rtx_insn *insn, *end;
405
406 /* If the head of this block is a CODE_LABEL, then it might be the
407 label for an exception handler which can't be reached. We need
408 to remove the label from the exception_handler_label list. */
409 insn = BB_HEAD (b);
410
411 end = get_last_bb_insn (b);
412
413 /* Selectively delete the entire chain. */
414 BB_HEAD (b) = NULL;
415 delete_insn_chain (insn, end, true);
416
417
418 if (dump_file)
419 fprintf (dump_file, "deleting block %d\n", b->index);
420 df_bb_delete (b->index);
421 }
422
423 /* Records the basic block struct in BLOCK_FOR_INSN for every insn. */
424
425 void
compute_bb_for_insn(void)426 compute_bb_for_insn (void)
427 {
428 basic_block bb;
429
430 FOR_EACH_BB_FN (bb, cfun)
431 {
432 rtx_insn *end = BB_END (bb);
433 rtx_insn *insn;
434
435 for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
436 {
437 BLOCK_FOR_INSN (insn) = bb;
438 if (insn == end)
439 break;
440 }
441 }
442 }
443
444 /* Release the basic_block_for_insn array. */
445
446 unsigned int
free_bb_for_insn(void)447 free_bb_for_insn (void)
448 {
449 rtx_insn *insn;
450 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
451 if (!BARRIER_P (insn))
452 BLOCK_FOR_INSN (insn) = NULL;
453 return 0;
454 }
455
456 namespace {
457
458 const pass_data pass_data_free_cfg =
459 {
460 RTL_PASS, /* type */
461 "*free_cfg", /* name */
462 OPTGROUP_NONE, /* optinfo_flags */
463 TV_NONE, /* tv_id */
464 0, /* properties_required */
465 0, /* properties_provided */
466 PROP_cfg, /* properties_destroyed */
467 0, /* todo_flags_start */
468 0, /* todo_flags_finish */
469 };
470
471 class pass_free_cfg : public rtl_opt_pass
472 {
473 public:
pass_free_cfg(gcc::context * ctxt)474 pass_free_cfg (gcc::context *ctxt)
475 : rtl_opt_pass (pass_data_free_cfg, ctxt)
476 {}
477
478 /* opt_pass methods: */
479 virtual unsigned int execute (function *);
480
481 }; // class pass_free_cfg
482
483 unsigned int
execute(function *)484 pass_free_cfg::execute (function *)
485 {
486 /* The resource.c machinery uses DF but the CFG isn't guaranteed to be
487 valid at that point so it would be too late to call df_analyze. */
488 if (DELAY_SLOTS && optimize > 0 && flag_delayed_branch)
489 {
490 df_note_add_problem ();
491 df_analyze ();
492 }
493
494 if (crtl->has_bb_partition)
495 insert_section_boundary_note ();
496
497 free_bb_for_insn ();
498 return 0;
499 }
500
501 } // anon namespace
502
503 rtl_opt_pass *
make_pass_free_cfg(gcc::context * ctxt)504 make_pass_free_cfg (gcc::context *ctxt)
505 {
506 return new pass_free_cfg (ctxt);
507 }
508
509 /* Return RTX to emit after when we want to emit code on the entry of function. */
510 rtx_insn *
entry_of_function(void)511 entry_of_function (void)
512 {
513 return (n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS ?
514 BB_HEAD (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb) : get_insns ());
515 }
516
517 /* Emit INSN at the entry point of the function, ensuring that it is only
518 executed once per function. */
519 void
emit_insn_at_entry(rtx insn)520 emit_insn_at_entry (rtx insn)
521 {
522 edge_iterator ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
523 edge e = ei_safe_edge (ei);
524 gcc_assert (e->flags & EDGE_FALLTHRU);
525
526 insert_insn_on_edge (insn, e);
527 commit_edge_insertions ();
528 }
529
530 /* Update BLOCK_FOR_INSN of insns between BEGIN and END
531 (or BARRIER if found) and notify df of the bb change.
532 The insn chain range is inclusive
533 (i.e. both BEGIN and END will be updated. */
534
535 static void
update_bb_for_insn_chain(rtx_insn * begin,rtx_insn * end,basic_block bb)536 update_bb_for_insn_chain (rtx_insn *begin, rtx_insn *end, basic_block bb)
537 {
538 rtx_insn *insn;
539
540 end = NEXT_INSN (end);
541 for (insn = begin; insn != end; insn = NEXT_INSN (insn))
542 if (!BARRIER_P (insn))
543 df_insn_change_bb (insn, bb);
544 }
545
546 /* Update BLOCK_FOR_INSN of insns in BB to BB,
547 and notify df of the change. */
548
549 void
update_bb_for_insn(basic_block bb)550 update_bb_for_insn (basic_block bb)
551 {
552 update_bb_for_insn_chain (BB_HEAD (bb), BB_END (bb), bb);
553 }
554
555
556 /* Like active_insn_p, except keep the return value clobber around
557 even after reload. */
558
559 static bool
flow_active_insn_p(const rtx_insn * insn)560 flow_active_insn_p (const rtx_insn *insn)
561 {
562 if (active_insn_p (insn))
563 return true;
564
565 /* A clobber of the function return value exists for buggy
566 programs that fail to return a value. Its effect is to
567 keep the return value from being live across the entire
568 function. If we allow it to be skipped, we introduce the
569 possibility for register lifetime confusion. */
570 if (GET_CODE (PATTERN (insn)) == CLOBBER
571 && REG_P (XEXP (PATTERN (insn), 0))
572 && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0)))
573 return true;
574
575 return false;
576 }
577
578 /* Return true if the block has no effect and only forwards control flow to
579 its single destination. */
580
581 bool
contains_no_active_insn_p(const_basic_block bb)582 contains_no_active_insn_p (const_basic_block bb)
583 {
584 rtx_insn *insn;
585
586 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
587 || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
588 || !single_succ_p (bb)
589 || (single_succ_edge (bb)->flags & EDGE_FAKE) != 0)
590 return false;
591
592 for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
593 if (INSN_P (insn) && flow_active_insn_p (insn))
594 return false;
595
596 return (!INSN_P (insn)
597 || (JUMP_P (insn) && simplejump_p (insn))
598 || !flow_active_insn_p (insn));
599 }
600
601 /* Likewise, but protect loop latches, headers and preheaders. */
602 /* FIXME: Make this a cfg hook. */
603
604 bool
forwarder_block_p(const_basic_block bb)605 forwarder_block_p (const_basic_block bb)
606 {
607 if (!contains_no_active_insn_p (bb))
608 return false;
609
610 /* Protect loop latches, headers and preheaders. */
611 if (current_loops)
612 {
613 basic_block dest;
614 if (bb->loop_father->header == bb)
615 return false;
616 dest = EDGE_SUCC (bb, 0)->dest;
617 if (dest->loop_father->header == dest)
618 return false;
619 }
620
621 return true;
622 }
623
624 /* Return nonzero if we can reach target from src by falling through. */
625 /* FIXME: Make this a cfg hook, the result is only valid in cfgrtl mode. */
626
627 bool
can_fallthru(basic_block src,basic_block target)628 can_fallthru (basic_block src, basic_block target)
629 {
630 rtx_insn *insn = BB_END (src);
631 rtx_insn *insn2;
632 edge e;
633 edge_iterator ei;
634
635 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
636 return true;
637 if (src->next_bb != target)
638 return false;
639
640 /* ??? Later we may add code to move jump tables offline. */
641 if (tablejump_p (insn, NULL, NULL))
642 return false;
643
644 FOR_EACH_EDGE (e, ei, src->succs)
645 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
646 && e->flags & EDGE_FALLTHRU)
647 return false;
648
649 insn2 = BB_HEAD (target);
650 if (!active_insn_p (insn2))
651 insn2 = next_active_insn (insn2);
652
653 return next_active_insn (insn) == insn2;
654 }
655
656 /* Return nonzero if we could reach target from src by falling through,
657 if the target was made adjacent. If we already have a fall-through
658 edge to the exit block, we can't do that. */
659 static bool
could_fall_through(basic_block src,basic_block target)660 could_fall_through (basic_block src, basic_block target)
661 {
662 edge e;
663 edge_iterator ei;
664
665 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
666 return true;
667 FOR_EACH_EDGE (e, ei, src->succs)
668 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
669 && e->flags & EDGE_FALLTHRU)
670 return 0;
671 return true;
672 }
673
674 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
675 rtx_note *
bb_note(basic_block bb)676 bb_note (basic_block bb)
677 {
678 rtx_insn *note;
679
680 note = BB_HEAD (bb);
681 if (LABEL_P (note))
682 note = NEXT_INSN (note);
683
684 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
685 return as_a <rtx_note *> (note);
686 }
687
688 /* Return the INSN immediately following the NOTE_INSN_BASIC_BLOCK
689 note associated with the BLOCK. */
690
691 static rtx_insn *
first_insn_after_basic_block_note(basic_block block)692 first_insn_after_basic_block_note (basic_block block)
693 {
694 rtx_insn *insn;
695
696 /* Get the first instruction in the block. */
697 insn = BB_HEAD (block);
698
699 if (insn == NULL_RTX)
700 return NULL;
701 if (LABEL_P (insn))
702 insn = NEXT_INSN (insn);
703 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
704
705 return NEXT_INSN (insn);
706 }
707
708 /* Creates a new basic block just after basic block BB by splitting
709 everything after specified instruction INSNP. */
710
711 static basic_block
rtl_split_block(basic_block bb,void * insnp)712 rtl_split_block (basic_block bb, void *insnp)
713 {
714 basic_block new_bb;
715 rtx_insn *insn = (rtx_insn *) insnp;
716 edge e;
717 edge_iterator ei;
718
719 if (!insn)
720 {
721 insn = first_insn_after_basic_block_note (bb);
722
723 if (insn)
724 {
725 rtx_insn *next = insn;
726
727 insn = PREV_INSN (insn);
728
729 /* If the block contains only debug insns, insn would have
730 been NULL in a non-debug compilation, and then we'd end
731 up emitting a DELETED note. For -fcompare-debug
732 stability, emit the note too. */
733 if (insn != BB_END (bb)
734 && DEBUG_INSN_P (next)
735 && DEBUG_INSN_P (BB_END (bb)))
736 {
737 while (next != BB_END (bb) && DEBUG_INSN_P (next))
738 next = NEXT_INSN (next);
739
740 if (next == BB_END (bb))
741 emit_note_after (NOTE_INSN_DELETED, next);
742 }
743 }
744 else
745 insn = get_last_insn ();
746 }
747
748 /* We probably should check type of the insn so that we do not create
749 inconsistent cfg. It is checked in verify_flow_info anyway, so do not
750 bother. */
751 if (insn == BB_END (bb))
752 emit_note_after (NOTE_INSN_DELETED, insn);
753
754 /* Create the new basic block. */
755 new_bb = create_basic_block (NEXT_INSN (insn), BB_END (bb), bb);
756 BB_COPY_PARTITION (new_bb, bb);
757 BB_END (bb) = insn;
758
759 /* Redirect the outgoing edges. */
760 new_bb->succs = bb->succs;
761 bb->succs = NULL;
762 FOR_EACH_EDGE (e, ei, new_bb->succs)
763 e->src = new_bb;
764
765 /* The new block starts off being dirty. */
766 df_set_bb_dirty (bb);
767 return new_bb;
768 }
769
770 /* Return true if the single edge between blocks A and B is the only place
771 in RTL which holds some unique locus. */
772
773 static bool
unique_locus_on_edge_between_p(basic_block a,basic_block b)774 unique_locus_on_edge_between_p (basic_block a, basic_block b)
775 {
776 const location_t goto_locus = EDGE_SUCC (a, 0)->goto_locus;
777 rtx_insn *insn, *end;
778
779 if (LOCATION_LOCUS (goto_locus) == UNKNOWN_LOCATION)
780 return false;
781
782 /* First scan block A backward. */
783 insn = BB_END (a);
784 end = PREV_INSN (BB_HEAD (a));
785 while (insn != end && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
786 insn = PREV_INSN (insn);
787
788 if (insn != end && INSN_LOCATION (insn) == goto_locus)
789 return false;
790
791 /* Then scan block B forward. */
792 insn = BB_HEAD (b);
793 if (insn)
794 {
795 end = NEXT_INSN (BB_END (b));
796 while (insn != end && !NONDEBUG_INSN_P (insn))
797 insn = NEXT_INSN (insn);
798
799 if (insn != end && INSN_HAS_LOCATION (insn)
800 && INSN_LOCATION (insn) == goto_locus)
801 return false;
802 }
803
804 return true;
805 }
806
807 /* If the single edge between blocks A and B is the only place in RTL which
808 holds some unique locus, emit a nop with that locus between the blocks. */
809
810 static void
emit_nop_for_unique_locus_between(basic_block a,basic_block b)811 emit_nop_for_unique_locus_between (basic_block a, basic_block b)
812 {
813 if (!unique_locus_on_edge_between_p (a, b))
814 return;
815
816 BB_END (a) = emit_insn_after_noloc (gen_nop (), BB_END (a), a);
817 INSN_LOCATION (BB_END (a)) = EDGE_SUCC (a, 0)->goto_locus;
818 }
819
820 /* Blocks A and B are to be merged into a single block A. The insns
821 are already contiguous. */
822
823 static void
rtl_merge_blocks(basic_block a,basic_block b)824 rtl_merge_blocks (basic_block a, basic_block b)
825 {
826 /* If B is a forwarder block whose outgoing edge has no location, we'll
827 propagate the locus of the edge between A and B onto it. */
828 const bool forward_edge_locus
829 = (b->flags & BB_FORWARDER_BLOCK) != 0
830 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION;
831 rtx_insn *b_head = BB_HEAD (b), *b_end = BB_END (b), *a_end = BB_END (a);
832 rtx_insn *del_first = NULL, *del_last = NULL;
833 rtx_insn *b_debug_start = b_end, *b_debug_end = b_end;
834 int b_empty = 0;
835
836 if (dump_file)
837 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
838 a->index);
839
840 while (DEBUG_INSN_P (b_end))
841 b_end = PREV_INSN (b_debug_start = b_end);
842
843 /* If there was a CODE_LABEL beginning B, delete it. */
844 if (LABEL_P (b_head))
845 {
846 /* Detect basic blocks with nothing but a label. This can happen
847 in particular at the end of a function. */
848 if (b_head == b_end)
849 b_empty = 1;
850
851 del_first = del_last = b_head;
852 b_head = NEXT_INSN (b_head);
853 }
854
855 /* Delete the basic block note and handle blocks containing just that
856 note. */
857 if (NOTE_INSN_BASIC_BLOCK_P (b_head))
858 {
859 if (b_head == b_end)
860 b_empty = 1;
861 if (! del_last)
862 del_first = b_head;
863
864 del_last = b_head;
865 b_head = NEXT_INSN (b_head);
866 }
867
868 /* If there was a jump out of A, delete it. */
869 if (JUMP_P (a_end))
870 {
871 rtx_insn *prev;
872
873 for (prev = PREV_INSN (a_end); ; prev = PREV_INSN (prev))
874 if (!NOTE_P (prev)
875 || NOTE_INSN_BASIC_BLOCK_P (prev)
876 || prev == BB_HEAD (a))
877 break;
878
879 del_first = a_end;
880
881 /* If this was a conditional jump, we need to also delete
882 the insn that set cc0. */
883 if (HAVE_cc0 && only_sets_cc0_p (prev))
884 {
885 rtx_insn *tmp = prev;
886
887 prev = prev_nonnote_insn (prev);
888 if (!prev)
889 prev = BB_HEAD (a);
890 del_first = tmp;
891 }
892
893 a_end = PREV_INSN (del_first);
894 }
895 else if (BARRIER_P (NEXT_INSN (a_end)))
896 del_first = NEXT_INSN (a_end);
897
898 /* Delete everything marked above as well as crap that might be
899 hanging out between the two blocks. */
900 BB_END (a) = a_end;
901 BB_HEAD (b) = b_empty ? NULL : b_head;
902 delete_insn_chain (del_first, del_last, true);
903
904 /* If not optimizing, preserve the locus of the single edge between
905 blocks A and B if necessary by emitting a nop. */
906 if (!optimize
907 && !forward_edge_locus
908 && !DECL_IGNORED_P (current_function_decl))
909 {
910 emit_nop_for_unique_locus_between (a, b);
911 a_end = BB_END (a);
912 }
913
914 /* Reassociate the insns of B with A. */
915 if (!b_empty)
916 {
917 update_bb_for_insn_chain (a_end, b_debug_end, a);
918
919 BB_END (a) = b_debug_end;
920 BB_HEAD (b) = NULL;
921 }
922 else if (b_end != b_debug_end)
923 {
924 /* Move any deleted labels and other notes between the end of A
925 and the debug insns that make up B after the debug insns,
926 bringing the debug insns into A while keeping the notes after
927 the end of A. */
928 if (NEXT_INSN (a_end) != b_debug_start)
929 reorder_insns_nobb (NEXT_INSN (a_end), PREV_INSN (b_debug_start),
930 b_debug_end);
931 update_bb_for_insn_chain (b_debug_start, b_debug_end, a);
932 BB_END (a) = b_debug_end;
933 }
934
935 df_bb_delete (b->index);
936
937 if (forward_edge_locus)
938 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
939
940 if (dump_file)
941 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
942 }
943
944
945 /* Return true when block A and B can be merged. */
946
947 static bool
rtl_can_merge_blocks(basic_block a,basic_block b)948 rtl_can_merge_blocks (basic_block a, basic_block b)
949 {
950 /* If we are partitioning hot/cold basic blocks, we don't want to
951 mess up unconditional or indirect jumps that cross between hot
952 and cold sections.
953
954 Basic block partitioning may result in some jumps that appear to
955 be optimizable (or blocks that appear to be mergeable), but which really
956 must be left untouched (they are required to make it safely across
957 partition boundaries). See the comments at the top of
958 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
959
960 if (BB_PARTITION (a) != BB_PARTITION (b))
961 return false;
962
963 /* Protect the loop latches. */
964 if (current_loops && b->loop_father->latch == b)
965 return false;
966
967 /* There must be exactly one edge in between the blocks. */
968 return (single_succ_p (a)
969 && single_succ (a) == b
970 && single_pred_p (b)
971 && a != b
972 /* Must be simple edge. */
973 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
974 && a->next_bb == b
975 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
976 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
977 /* If the jump insn has side effects,
978 we can't kill the edge. */
979 && (!JUMP_P (BB_END (a))
980 || (reload_completed
981 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
982 }
983
984 /* Return the label in the head of basic block BLOCK. Create one if it doesn't
985 exist. */
986
987 rtx_code_label *
block_label(basic_block block)988 block_label (basic_block block)
989 {
990 if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
991 return NULL;
992
993 if (!LABEL_P (BB_HEAD (block)))
994 {
995 BB_HEAD (block) = emit_label_before (gen_label_rtx (), BB_HEAD (block));
996 }
997
998 return as_a <rtx_code_label *> (BB_HEAD (block));
999 }
1000
1001 /* Remove all barriers from BB_FOOTER of a BB. */
1002
1003 static void
remove_barriers_from_footer(basic_block bb)1004 remove_barriers_from_footer (basic_block bb)
1005 {
1006 rtx_insn *insn = BB_FOOTER (bb);
1007
1008 /* Remove barriers but keep jumptables. */
1009 while (insn)
1010 {
1011 if (BARRIER_P (insn))
1012 {
1013 if (PREV_INSN (insn))
1014 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
1015 else
1016 BB_FOOTER (bb) = NEXT_INSN (insn);
1017 if (NEXT_INSN (insn))
1018 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
1019 }
1020 if (LABEL_P (insn))
1021 return;
1022 insn = NEXT_INSN (insn);
1023 }
1024 }
1025
1026 /* Attempt to perform edge redirection by replacing possibly complex jump
1027 instruction by unconditional jump or removing jump completely. This can
1028 apply only if all edges now point to the same block. The parameters and
1029 return values are equivalent to redirect_edge_and_branch. */
1030
1031 edge
try_redirect_by_replacing_jump(edge e,basic_block target,bool in_cfglayout)1032 try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
1033 {
1034 basic_block src = e->src;
1035 rtx_insn *insn = BB_END (src), *kill_from;
1036 rtx set;
1037 int fallthru = 0;
1038
1039 /* If we are partitioning hot/cold basic blocks, we don't want to
1040 mess up unconditional or indirect jumps that cross between hot
1041 and cold sections.
1042
1043 Basic block partitioning may result in some jumps that appear to
1044 be optimizable (or blocks that appear to be mergeable), but which really
1045 must be left untouched (they are required to make it safely across
1046 partition boundaries). See the comments at the top of
1047 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1048
1049 if (BB_PARTITION (src) != BB_PARTITION (target))
1050 return NULL;
1051
1052 /* We can replace or remove a complex jump only when we have exactly
1053 two edges. Also, if we have exactly one outgoing edge, we can
1054 redirect that. */
1055 if (EDGE_COUNT (src->succs) >= 3
1056 /* Verify that all targets will be TARGET. Specifically, the
1057 edge that is not E must also go to TARGET. */
1058 || (EDGE_COUNT (src->succs) == 2
1059 && EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest != target))
1060 return NULL;
1061
1062 if (!onlyjump_p (insn))
1063 return NULL;
1064 if ((!optimize || reload_completed) && tablejump_p (insn, NULL, NULL))
1065 return NULL;
1066
1067 /* Avoid removing branch with side effects. */
1068 set = single_set (insn);
1069 if (!set || side_effects_p (set))
1070 return NULL;
1071
1072 /* In case we zap a conditional jump, we'll need to kill
1073 the cc0 setter too. */
1074 kill_from = insn;
1075 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, PATTERN (insn))
1076 && only_sets_cc0_p (PREV_INSN (insn)))
1077 kill_from = PREV_INSN (insn);
1078
1079 /* See if we can create the fallthru edge. */
1080 if (in_cfglayout || can_fallthru (src, target))
1081 {
1082 if (dump_file)
1083 fprintf (dump_file, "Removing jump %i.\n", INSN_UID (insn));
1084 fallthru = 1;
1085
1086 /* Selectively unlink whole insn chain. */
1087 if (in_cfglayout)
1088 {
1089 delete_insn_chain (kill_from, BB_END (src), false);
1090 remove_barriers_from_footer (src);
1091 }
1092 else
1093 delete_insn_chain (kill_from, PREV_INSN (BB_HEAD (target)),
1094 false);
1095 }
1096
1097 /* If this already is simplejump, redirect it. */
1098 else if (simplejump_p (insn))
1099 {
1100 if (e->dest == target)
1101 return NULL;
1102 if (dump_file)
1103 fprintf (dump_file, "Redirecting jump %i from %i to %i.\n",
1104 INSN_UID (insn), e->dest->index, target->index);
1105 if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
1106 block_label (target), 0))
1107 {
1108 gcc_assert (target == EXIT_BLOCK_PTR_FOR_FN (cfun));
1109 return NULL;
1110 }
1111 }
1112
1113 /* Cannot do anything for target exit block. */
1114 else if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1115 return NULL;
1116
1117 /* Or replace possibly complicated jump insn by simple jump insn. */
1118 else
1119 {
1120 rtx_code_label *target_label = block_label (target);
1121 rtx_insn *barrier;
1122 rtx_insn *label;
1123 rtx_jump_table_data *table;
1124
1125 emit_jump_insn_after_noloc (targetm.gen_jump (target_label), insn);
1126 JUMP_LABEL (BB_END (src)) = target_label;
1127 LABEL_NUSES (target_label)++;
1128 if (dump_file)
1129 fprintf (dump_file, "Replacing insn %i by jump %i\n",
1130 INSN_UID (insn), INSN_UID (BB_END (src)));
1131
1132
1133 delete_insn_chain (kill_from, insn, false);
1134
1135 /* Recognize a tablejump that we are converting to a
1136 simple jump and remove its associated CODE_LABEL
1137 and ADDR_VEC or ADDR_DIFF_VEC. */
1138 if (tablejump_p (insn, &label, &table))
1139 delete_insn_chain (label, table, false);
1140
1141 barrier = next_nonnote_nondebug_insn (BB_END (src));
1142 if (!barrier || !BARRIER_P (barrier))
1143 emit_barrier_after (BB_END (src));
1144 else
1145 {
1146 if (barrier != NEXT_INSN (BB_END (src)))
1147 {
1148 /* Move the jump before barrier so that the notes
1149 which originally were or were created before jump table are
1150 inside the basic block. */
1151 rtx_insn *new_insn = BB_END (src);
1152
1153 update_bb_for_insn_chain (NEXT_INSN (BB_END (src)),
1154 PREV_INSN (barrier), src);
1155
1156 SET_NEXT_INSN (PREV_INSN (new_insn)) = NEXT_INSN (new_insn);
1157 SET_PREV_INSN (NEXT_INSN (new_insn)) = PREV_INSN (new_insn);
1158
1159 SET_NEXT_INSN (new_insn) = barrier;
1160 SET_NEXT_INSN (PREV_INSN (barrier)) = new_insn;
1161
1162 SET_PREV_INSN (new_insn) = PREV_INSN (barrier);
1163 SET_PREV_INSN (barrier) = new_insn;
1164 }
1165 }
1166 }
1167
1168 /* Keep only one edge out and set proper flags. */
1169 if (!single_succ_p (src))
1170 remove_edge (e);
1171 gcc_assert (single_succ_p (src));
1172
1173 e = single_succ_edge (src);
1174 if (fallthru)
1175 e->flags = EDGE_FALLTHRU;
1176 else
1177 e->flags = 0;
1178
1179 e->probability = profile_probability::always ();
1180
1181 if (e->dest != target)
1182 redirect_edge_succ (e, target);
1183 return e;
1184 }
1185
1186 /* Subroutine of redirect_branch_edge that tries to patch the jump
1187 instruction INSN so that it reaches block NEW. Do this
1188 only when it originally reached block OLD. Return true if this
1189 worked or the original target wasn't OLD, return false if redirection
1190 doesn't work. */
1191
1192 static bool
patch_jump_insn(rtx_insn * insn,rtx_insn * old_label,basic_block new_bb)1193 patch_jump_insn (rtx_insn *insn, rtx_insn *old_label, basic_block new_bb)
1194 {
1195 rtx_jump_table_data *table;
1196 rtx tmp;
1197 /* Recognize a tablejump and adjust all matching cases. */
1198 if (tablejump_p (insn, NULL, &table))
1199 {
1200 rtvec vec;
1201 int j;
1202 rtx_code_label *new_label = block_label (new_bb);
1203
1204 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1205 return false;
1206 vec = table->get_labels ();
1207
1208 for (j = GET_NUM_ELEM (vec) - 1; j >= 0; --j)
1209 if (XEXP (RTVEC_ELT (vec, j), 0) == old_label)
1210 {
1211 RTVEC_ELT (vec, j) = gen_rtx_LABEL_REF (Pmode, new_label);
1212 --LABEL_NUSES (old_label);
1213 ++LABEL_NUSES (new_label);
1214 }
1215
1216 /* Handle casesi dispatch insns. */
1217 if ((tmp = single_set (insn)) != NULL
1218 && SET_DEST (tmp) == pc_rtx
1219 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
1220 && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF
1221 && label_ref_label (XEXP (SET_SRC (tmp), 2)) == old_label)
1222 {
1223 XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (Pmode,
1224 new_label);
1225 --LABEL_NUSES (old_label);
1226 ++LABEL_NUSES (new_label);
1227 }
1228 }
1229 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
1230 {
1231 int i, n = ASM_OPERANDS_LABEL_LENGTH (tmp);
1232 rtx note;
1233
1234 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1235 return false;
1236 rtx_code_label *new_label = block_label (new_bb);
1237
1238 for (i = 0; i < n; ++i)
1239 {
1240 rtx old_ref = ASM_OPERANDS_LABEL (tmp, i);
1241 gcc_assert (GET_CODE (old_ref) == LABEL_REF);
1242 if (XEXP (old_ref, 0) == old_label)
1243 {
1244 ASM_OPERANDS_LABEL (tmp, i)
1245 = gen_rtx_LABEL_REF (Pmode, new_label);
1246 --LABEL_NUSES (old_label);
1247 ++LABEL_NUSES (new_label);
1248 }
1249 }
1250
1251 if (JUMP_LABEL (insn) == old_label)
1252 {
1253 JUMP_LABEL (insn) = new_label;
1254 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1255 if (note)
1256 remove_note (insn, note);
1257 }
1258 else
1259 {
1260 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1261 if (note)
1262 remove_note (insn, note);
1263 if (JUMP_LABEL (insn) != new_label
1264 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1265 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1266 }
1267 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1268 != NULL_RTX)
1269 XEXP (note, 0) = new_label;
1270 }
1271 else
1272 {
1273 /* ?? We may play the games with moving the named labels from
1274 one basic block to the other in case only one computed_jump is
1275 available. */
1276 if (computed_jump_p (insn)
1277 /* A return instruction can't be redirected. */
1278 || returnjump_p (insn))
1279 return false;
1280
1281 if (!currently_expanding_to_rtl || JUMP_LABEL (insn) == old_label)
1282 {
1283 /* If the insn doesn't go where we think, we're confused. */
1284 gcc_assert (JUMP_LABEL (insn) == old_label);
1285
1286 /* If the substitution doesn't succeed, die. This can happen
1287 if the back end emitted unrecognizable instructions or if
1288 target is exit block on some arches. Or for crossing
1289 jumps. */
1290 if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
1291 block_label (new_bb), 0))
1292 {
1293 gcc_assert (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
1294 || CROSSING_JUMP_P (insn));
1295 return false;
1296 }
1297 }
1298 }
1299 return true;
1300 }
1301
1302
1303 /* Redirect edge representing branch of (un)conditional jump or tablejump,
1304 NULL on failure */
1305 static edge
redirect_branch_edge(edge e,basic_block target)1306 redirect_branch_edge (edge e, basic_block target)
1307 {
1308 rtx_insn *old_label = BB_HEAD (e->dest);
1309 basic_block src = e->src;
1310 rtx_insn *insn = BB_END (src);
1311
1312 /* We can only redirect non-fallthru edges of jump insn. */
1313 if (e->flags & EDGE_FALLTHRU)
1314 return NULL;
1315 else if (!JUMP_P (insn) && !currently_expanding_to_rtl)
1316 return NULL;
1317
1318 if (!currently_expanding_to_rtl)
1319 {
1320 if (!patch_jump_insn (as_a <rtx_jump_insn *> (insn), old_label, target))
1321 return NULL;
1322 }
1323 else
1324 /* When expanding this BB might actually contain multiple
1325 jumps (i.e. not yet split by find_many_sub_basic_blocks).
1326 Redirect all of those that match our label. */
1327 FOR_BB_INSNS (src, insn)
1328 if (JUMP_P (insn) && !patch_jump_insn (as_a <rtx_jump_insn *> (insn),
1329 old_label, target))
1330 return NULL;
1331
1332 if (dump_file)
1333 fprintf (dump_file, "Edge %i->%i redirected to %i\n",
1334 e->src->index, e->dest->index, target->index);
1335
1336 if (e->dest != target)
1337 e = redirect_edge_succ_nodup (e, target);
1338
1339 return e;
1340 }
1341
1342 /* Called when edge E has been redirected to a new destination,
1343 in order to update the region crossing flag on the edge and
1344 jump. */
1345
1346 static void
fixup_partition_crossing(edge e)1347 fixup_partition_crossing (edge e)
1348 {
1349 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || e->dest
1350 == EXIT_BLOCK_PTR_FOR_FN (cfun))
1351 return;
1352 /* If we redirected an existing edge, it may already be marked
1353 crossing, even though the new src is missing a reg crossing note.
1354 But make sure reg crossing note doesn't already exist before
1355 inserting. */
1356 if (BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1357 {
1358 e->flags |= EDGE_CROSSING;
1359 if (JUMP_P (BB_END (e->src)))
1360 CROSSING_JUMP_P (BB_END (e->src)) = 1;
1361 }
1362 else if (BB_PARTITION (e->src) == BB_PARTITION (e->dest))
1363 {
1364 e->flags &= ~EDGE_CROSSING;
1365 /* Remove the section crossing note from jump at end of
1366 src if it exists, and if no other successors are
1367 still crossing. */
1368 if (JUMP_P (BB_END (e->src)) && CROSSING_JUMP_P (BB_END (e->src)))
1369 {
1370 bool has_crossing_succ = false;
1371 edge e2;
1372 edge_iterator ei;
1373 FOR_EACH_EDGE (e2, ei, e->src->succs)
1374 {
1375 has_crossing_succ |= (e2->flags & EDGE_CROSSING);
1376 if (has_crossing_succ)
1377 break;
1378 }
1379 if (!has_crossing_succ)
1380 CROSSING_JUMP_P (BB_END (e->src)) = 0;
1381 }
1382 }
1383 }
1384
1385 /* Called when block BB has been reassigned to the cold partition,
1386 because it is now dominated by another cold block,
1387 to ensure that the region crossing attributes are updated. */
1388
1389 static void
fixup_new_cold_bb(basic_block bb)1390 fixup_new_cold_bb (basic_block bb)
1391 {
1392 edge e;
1393 edge_iterator ei;
1394
1395 /* This is called when a hot bb is found to now be dominated
1396 by a cold bb and therefore needs to become cold. Therefore,
1397 its preds will no longer be region crossing. Any non-dominating
1398 preds that were previously hot would also have become cold
1399 in the caller for the same region. Any preds that were previously
1400 region-crossing will be adjusted in fixup_partition_crossing. */
1401 FOR_EACH_EDGE (e, ei, bb->preds)
1402 {
1403 fixup_partition_crossing (e);
1404 }
1405
1406 /* Possibly need to make bb's successor edges region crossing,
1407 or remove stale region crossing. */
1408 FOR_EACH_EDGE (e, ei, bb->succs)
1409 {
1410 /* We can't have fall-through edges across partition boundaries.
1411 Note that force_nonfallthru will do any necessary partition
1412 boundary fixup by calling fixup_partition_crossing itself. */
1413 if ((e->flags & EDGE_FALLTHRU)
1414 && BB_PARTITION (bb) != BB_PARTITION (e->dest)
1415 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1416 force_nonfallthru (e);
1417 else
1418 fixup_partition_crossing (e);
1419 }
1420 }
1421
1422 /* Attempt to change code to redirect edge E to TARGET. Don't do that on
1423 expense of adding new instructions or reordering basic blocks.
1424
1425 Function can be also called with edge destination equivalent to the TARGET.
1426 Then it should try the simplifications and do nothing if none is possible.
1427
1428 Return edge representing the branch if transformation succeeded. Return NULL
1429 on failure.
1430 We still return NULL in case E already destinated TARGET and we didn't
1431 managed to simplify instruction stream. */
1432
1433 static edge
rtl_redirect_edge_and_branch(edge e,basic_block target)1434 rtl_redirect_edge_and_branch (edge e, basic_block target)
1435 {
1436 edge ret;
1437 basic_block src = e->src;
1438 basic_block dest = e->dest;
1439
1440 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
1441 return NULL;
1442
1443 if (dest == target)
1444 return e;
1445
1446 if ((ret = try_redirect_by_replacing_jump (e, target, false)) != NULL)
1447 {
1448 df_set_bb_dirty (src);
1449 fixup_partition_crossing (ret);
1450 return ret;
1451 }
1452
1453 ret = redirect_branch_edge (e, target);
1454 if (!ret)
1455 return NULL;
1456
1457 df_set_bb_dirty (src);
1458 fixup_partition_crossing (ret);
1459 return ret;
1460 }
1461
1462 /* Emit a barrier after BB, into the footer if we are in CFGLAYOUT mode. */
1463
1464 void
emit_barrier_after_bb(basic_block bb)1465 emit_barrier_after_bb (basic_block bb)
1466 {
1467 rtx_barrier *barrier = emit_barrier_after (BB_END (bb));
1468 gcc_assert (current_ir_type () == IR_RTL_CFGRTL
1469 || current_ir_type () == IR_RTL_CFGLAYOUT);
1470 if (current_ir_type () == IR_RTL_CFGLAYOUT)
1471 {
1472 rtx_insn *insn = unlink_insn_chain (barrier, barrier);
1473
1474 if (BB_FOOTER (bb))
1475 {
1476 rtx_insn *footer_tail = BB_FOOTER (bb);
1477
1478 while (NEXT_INSN (footer_tail))
1479 footer_tail = NEXT_INSN (footer_tail);
1480 if (!BARRIER_P (footer_tail))
1481 {
1482 SET_NEXT_INSN (footer_tail) = insn;
1483 SET_PREV_INSN (insn) = footer_tail;
1484 }
1485 }
1486 else
1487 BB_FOOTER (bb) = insn;
1488 }
1489 }
1490
1491 /* Like force_nonfallthru below, but additionally performs redirection
1492 Used by redirect_edge_and_branch_force. JUMP_LABEL is used only
1493 when redirecting to the EXIT_BLOCK, it is either ret_rtx or
1494 simple_return_rtx, indicating which kind of returnjump to create.
1495 It should be NULL otherwise. */
1496
1497 basic_block
force_nonfallthru_and_redirect(edge e,basic_block target,rtx jump_label)1498 force_nonfallthru_and_redirect (edge e, basic_block target, rtx jump_label)
1499 {
1500 basic_block jump_block, new_bb = NULL, src = e->src;
1501 rtx note;
1502 edge new_edge;
1503 int abnormal_edge_flags = 0;
1504 bool asm_goto_edge = false;
1505 int loc;
1506
1507 /* In the case the last instruction is conditional jump to the next
1508 instruction, first redirect the jump itself and then continue
1509 by creating a basic block afterwards to redirect fallthru edge. */
1510 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1511 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1512 && any_condjump_p (BB_END (e->src))
1513 && JUMP_LABEL (BB_END (e->src)) == BB_HEAD (e->dest))
1514 {
1515 rtx note;
1516 edge b = unchecked_make_edge (e->src, target, 0);
1517 bool redirected;
1518
1519 redirected = redirect_jump (as_a <rtx_jump_insn *> (BB_END (e->src)),
1520 block_label (target), 0);
1521 gcc_assert (redirected);
1522
1523 note = find_reg_note (BB_END (e->src), REG_BR_PROB, NULL_RTX);
1524 if (note)
1525 {
1526 int prob = XINT (note, 0);
1527
1528 b->probability = profile_probability::from_reg_br_prob_note (prob);
1529 e->probability -= e->probability;
1530 }
1531 }
1532
1533 if (e->flags & EDGE_ABNORMAL)
1534 {
1535 /* Irritating special case - fallthru edge to the same block as abnormal
1536 edge.
1537 We can't redirect abnormal edge, but we still can split the fallthru
1538 one and create separate abnormal edge to original destination.
1539 This allows bb-reorder to make such edge non-fallthru. */
1540 gcc_assert (e->dest == target);
1541 abnormal_edge_flags = e->flags & ~EDGE_FALLTHRU;
1542 e->flags &= EDGE_FALLTHRU;
1543 }
1544 else
1545 {
1546 gcc_assert (e->flags & EDGE_FALLTHRU);
1547 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1548 {
1549 /* We can't redirect the entry block. Create an empty block
1550 at the start of the function which we use to add the new
1551 jump. */
1552 edge tmp;
1553 edge_iterator ei;
1554 bool found = false;
1555
1556 basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL,
1557 ENTRY_BLOCK_PTR_FOR_FN (cfun));
1558 bb->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
1559
1560 /* Make sure new block ends up in correct hot/cold section. */
1561 BB_COPY_PARTITION (bb, e->dest);
1562
1563 /* Change the existing edge's source to be the new block, and add
1564 a new edge from the entry block to the new block. */
1565 e->src = bb;
1566 for (ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
1567 (tmp = ei_safe_edge (ei)); )
1568 {
1569 if (tmp == e)
1570 {
1571 ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs->unordered_remove (ei.index);
1572 found = true;
1573 break;
1574 }
1575 else
1576 ei_next (&ei);
1577 }
1578
1579 gcc_assert (found);
1580
1581 vec_safe_push (bb->succs, e);
1582 make_single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb,
1583 EDGE_FALLTHRU);
1584 }
1585 }
1586
1587 /* If e->src ends with asm goto, see if any of the ASM_OPERANDS_LABELs
1588 don't point to the target or fallthru label. */
1589 if (JUMP_P (BB_END (e->src))
1590 && target != EXIT_BLOCK_PTR_FOR_FN (cfun)
1591 && (e->flags & EDGE_FALLTHRU)
1592 && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
1593 {
1594 int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
1595 bool adjust_jump_target = false;
1596
1597 for (i = 0; i < n; ++i)
1598 {
1599 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (e->dest))
1600 {
1601 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))--;
1602 XEXP (ASM_OPERANDS_LABEL (note, i), 0) = block_label (target);
1603 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))++;
1604 adjust_jump_target = true;
1605 }
1606 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (target))
1607 asm_goto_edge = true;
1608 }
1609 if (adjust_jump_target)
1610 {
1611 rtx_insn *insn = BB_END (e->src);
1612 rtx note;
1613 rtx_insn *old_label = BB_HEAD (e->dest);
1614 rtx_insn *new_label = BB_HEAD (target);
1615
1616 if (JUMP_LABEL (insn) == old_label)
1617 {
1618 JUMP_LABEL (insn) = new_label;
1619 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1620 if (note)
1621 remove_note (insn, note);
1622 }
1623 else
1624 {
1625 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1626 if (note)
1627 remove_note (insn, note);
1628 if (JUMP_LABEL (insn) != new_label
1629 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1630 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1631 }
1632 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1633 != NULL_RTX)
1634 XEXP (note, 0) = new_label;
1635 }
1636 }
1637
1638 if (EDGE_COUNT (e->src->succs) >= 2 || abnormal_edge_flags || asm_goto_edge)
1639 {
1640 rtx_insn *new_head;
1641 profile_count count = e->count ();
1642 profile_probability probability = e->probability;
1643 /* Create the new structures. */
1644
1645 /* If the old block ended with a tablejump, skip its table
1646 by searching forward from there. Otherwise start searching
1647 forward from the last instruction of the old block. */
1648 rtx_jump_table_data *table;
1649 if (tablejump_p (BB_END (e->src), NULL, &table))
1650 new_head = table;
1651 else
1652 new_head = BB_END (e->src);
1653 new_head = NEXT_INSN (new_head);
1654
1655 jump_block = create_basic_block (new_head, NULL, e->src);
1656 jump_block->count = count;
1657
1658 /* Make sure new block ends up in correct hot/cold section. */
1659
1660 BB_COPY_PARTITION (jump_block, e->src);
1661
1662 /* Wire edge in. */
1663 new_edge = make_edge (e->src, jump_block, EDGE_FALLTHRU);
1664 new_edge->probability = probability;
1665
1666 /* Redirect old edge. */
1667 redirect_edge_pred (e, jump_block);
1668 e->probability = profile_probability::always ();
1669
1670 /* If e->src was previously region crossing, it no longer is
1671 and the reg crossing note should be removed. */
1672 fixup_partition_crossing (new_edge);
1673
1674 /* If asm goto has any label refs to target's label,
1675 add also edge from asm goto bb to target. */
1676 if (asm_goto_edge)
1677 {
1678 new_edge->probability = new_edge->probability.apply_scale (1, 2);
1679 jump_block->count = jump_block->count.apply_scale (1, 2);
1680 edge new_edge2 = make_edge (new_edge->src, target,
1681 e->flags & ~EDGE_FALLTHRU);
1682 new_edge2->probability = probability - new_edge->probability;
1683 }
1684
1685 new_bb = jump_block;
1686 }
1687 else
1688 jump_block = e->src;
1689
1690 loc = e->goto_locus;
1691 e->flags &= ~EDGE_FALLTHRU;
1692 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1693 {
1694 if (jump_label == ret_rtx)
1695 emit_jump_insn_after_setloc (targetm.gen_return (),
1696 BB_END (jump_block), loc);
1697 else
1698 {
1699 gcc_assert (jump_label == simple_return_rtx);
1700 emit_jump_insn_after_setloc (targetm.gen_simple_return (),
1701 BB_END (jump_block), loc);
1702 }
1703 set_return_jump_label (BB_END (jump_block));
1704 }
1705 else
1706 {
1707 rtx_code_label *label = block_label (target);
1708 emit_jump_insn_after_setloc (targetm.gen_jump (label),
1709 BB_END (jump_block), loc);
1710 JUMP_LABEL (BB_END (jump_block)) = label;
1711 LABEL_NUSES (label)++;
1712 }
1713
1714 /* We might be in cfg layout mode, and if so, the following routine will
1715 insert the barrier correctly. */
1716 emit_barrier_after_bb (jump_block);
1717 redirect_edge_succ_nodup (e, target);
1718
1719 if (abnormal_edge_flags)
1720 make_edge (src, target, abnormal_edge_flags);
1721
1722 df_mark_solutions_dirty ();
1723 fixup_partition_crossing (e);
1724 return new_bb;
1725 }
1726
1727 /* Edge E is assumed to be fallthru edge. Emit needed jump instruction
1728 (and possibly create new basic block) to make edge non-fallthru.
1729 Return newly created BB or NULL if none. */
1730
1731 static basic_block
rtl_force_nonfallthru(edge e)1732 rtl_force_nonfallthru (edge e)
1733 {
1734 return force_nonfallthru_and_redirect (e, e->dest, NULL_RTX);
1735 }
1736
1737 /* Redirect edge even at the expense of creating new jump insn or
1738 basic block. Return new basic block if created, NULL otherwise.
1739 Conversion must be possible. */
1740
1741 static basic_block
rtl_redirect_edge_and_branch_force(edge e,basic_block target)1742 rtl_redirect_edge_and_branch_force (edge e, basic_block target)
1743 {
1744 if (redirect_edge_and_branch (e, target)
1745 || e->dest == target)
1746 return NULL;
1747
1748 /* In case the edge redirection failed, try to force it to be non-fallthru
1749 and redirect newly created simplejump. */
1750 df_set_bb_dirty (e->src);
1751 return force_nonfallthru_and_redirect (e, target, NULL_RTX);
1752 }
1753
1754 /* The given edge should potentially be a fallthru edge. If that is in
1755 fact true, delete the jump and barriers that are in the way. */
1756
1757 static void
rtl_tidy_fallthru_edge(edge e)1758 rtl_tidy_fallthru_edge (edge e)
1759 {
1760 rtx_insn *q;
1761 basic_block b = e->src, c = b->next_bb;
1762
1763 /* ??? In a late-running flow pass, other folks may have deleted basic
1764 blocks by nopping out blocks, leaving multiple BARRIERs between here
1765 and the target label. They ought to be chastised and fixed.
1766
1767 We can also wind up with a sequence of undeletable labels between
1768 one block and the next.
1769
1770 So search through a sequence of barriers, labels, and notes for
1771 the head of block C and assert that we really do fall through. */
1772
1773 for (q = NEXT_INSN (BB_END (b)); q != BB_HEAD (c); q = NEXT_INSN (q))
1774 if (NONDEBUG_INSN_P (q))
1775 return;
1776
1777 /* Remove what will soon cease being the jump insn from the source block.
1778 If block B consisted only of this single jump, turn it into a deleted
1779 note. */
1780 q = BB_END (b);
1781 if (JUMP_P (q)
1782 && onlyjump_p (q)
1783 && (any_uncondjump_p (q)
1784 || single_succ_p (b)))
1785 {
1786 rtx_insn *label;
1787 rtx_jump_table_data *table;
1788
1789 if (tablejump_p (q, &label, &table))
1790 {
1791 /* The label is likely mentioned in some instruction before
1792 the tablejump and might not be DCEd, so turn it into
1793 a note instead and move before the tablejump that is going to
1794 be deleted. */
1795 const char *name = LABEL_NAME (label);
1796 PUT_CODE (label, NOTE);
1797 NOTE_KIND (label) = NOTE_INSN_DELETED_LABEL;
1798 NOTE_DELETED_LABEL_NAME (label) = name;
1799 reorder_insns (label, label, PREV_INSN (q));
1800 delete_insn (table);
1801 }
1802
1803 /* If this was a conditional jump, we need to also delete
1804 the insn that set cc0. */
1805 if (HAVE_cc0 && any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q)))
1806 q = PREV_INSN (q);
1807
1808 q = PREV_INSN (q);
1809 }
1810 /* Unconditional jumps with side-effects (i.e. which we can't just delete
1811 together with the barrier) should never have a fallthru edge. */
1812 else if (JUMP_P (q) && any_uncondjump_p (q))
1813 return;
1814
1815 /* Selectively unlink the sequence. */
1816 if (q != PREV_INSN (BB_HEAD (c)))
1817 delete_insn_chain (NEXT_INSN (q), PREV_INSN (BB_HEAD (c)), false);
1818
1819 e->flags |= EDGE_FALLTHRU;
1820 }
1821
1822 /* Should move basic block BB after basic block AFTER. NIY. */
1823
1824 static bool
rtl_move_block_after(basic_block bb ATTRIBUTE_UNUSED,basic_block after ATTRIBUTE_UNUSED)1825 rtl_move_block_after (basic_block bb ATTRIBUTE_UNUSED,
1826 basic_block after ATTRIBUTE_UNUSED)
1827 {
1828 return false;
1829 }
1830
1831 /* Locate the last bb in the same partition as START_BB. */
1832
1833 static basic_block
last_bb_in_partition(basic_block start_bb)1834 last_bb_in_partition (basic_block start_bb)
1835 {
1836 basic_block bb;
1837 FOR_BB_BETWEEN (bb, start_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
1838 {
1839 if (BB_PARTITION (start_bb) != BB_PARTITION (bb->next_bb))
1840 return bb;
1841 }
1842 /* Return bb before the exit block. */
1843 return bb->prev_bb;
1844 }
1845
1846 /* Split a (typically critical) edge. Return the new block.
1847 The edge must not be abnormal.
1848
1849 ??? The code generally expects to be called on critical edges.
1850 The case of a block ending in an unconditional jump to a
1851 block with multiple predecessors is not handled optimally. */
1852
1853 static basic_block
rtl_split_edge(edge edge_in)1854 rtl_split_edge (edge edge_in)
1855 {
1856 basic_block bb, new_bb;
1857 rtx_insn *before;
1858
1859 /* Abnormal edges cannot be split. */
1860 gcc_assert (!(edge_in->flags & EDGE_ABNORMAL));
1861
1862 /* We are going to place the new block in front of edge destination.
1863 Avoid existence of fallthru predecessors. */
1864 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1865 {
1866 edge e = find_fallthru_edge (edge_in->dest->preds);
1867
1868 if (e)
1869 force_nonfallthru (e);
1870 }
1871
1872 /* Create the basic block note. */
1873 if (edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1874 before = BB_HEAD (edge_in->dest);
1875 else
1876 before = NULL;
1877
1878 /* If this is a fall through edge to the exit block, the blocks might be
1879 not adjacent, and the right place is after the source. */
1880 if ((edge_in->flags & EDGE_FALLTHRU)
1881 && edge_in->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1882 {
1883 before = NEXT_INSN (BB_END (edge_in->src));
1884 bb = create_basic_block (before, NULL, edge_in->src);
1885 BB_COPY_PARTITION (bb, edge_in->src);
1886 }
1887 else
1888 {
1889 if (edge_in->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1890 {
1891 bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
1892 BB_COPY_PARTITION (bb, edge_in->dest);
1893 }
1894 else
1895 {
1896 basic_block after = edge_in->dest->prev_bb;
1897 /* If this is post-bb reordering, and the edge crosses a partition
1898 boundary, the new block needs to be inserted in the bb chain
1899 at the end of the src partition (since we put the new bb into
1900 that partition, see below). Otherwise we may end up creating
1901 an extra partition crossing in the chain, which is illegal.
1902 It can't go after the src, because src may have a fall-through
1903 to a different block. */
1904 if (crtl->bb_reorder_complete
1905 && (edge_in->flags & EDGE_CROSSING))
1906 {
1907 after = last_bb_in_partition (edge_in->src);
1908 before = get_last_bb_insn (after);
1909 /* The instruction following the last bb in partition should
1910 be a barrier, since it cannot end in a fall-through. */
1911 gcc_checking_assert (BARRIER_P (before));
1912 before = NEXT_INSN (before);
1913 }
1914 bb = create_basic_block (before, NULL, after);
1915 /* Put the split bb into the src partition, to avoid creating
1916 a situation where a cold bb dominates a hot bb, in the case
1917 where src is cold and dest is hot. The src will dominate
1918 the new bb (whereas it might not have dominated dest). */
1919 BB_COPY_PARTITION (bb, edge_in->src);
1920 }
1921 }
1922
1923 make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU);
1924
1925 /* Can't allow a region crossing edge to be fallthrough. */
1926 if (BB_PARTITION (bb) != BB_PARTITION (edge_in->dest)
1927 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1928 {
1929 new_bb = force_nonfallthru (single_succ_edge (bb));
1930 gcc_assert (!new_bb);
1931 }
1932
1933 /* For non-fallthru edges, we must adjust the predecessor's
1934 jump instruction to target our new block. */
1935 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1936 {
1937 edge redirected = redirect_edge_and_branch (edge_in, bb);
1938 gcc_assert (redirected);
1939 }
1940 else
1941 {
1942 if (edge_in->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
1943 {
1944 /* For asm goto even splitting of fallthru edge might
1945 need insn patching, as other labels might point to the
1946 old label. */
1947 rtx_insn *last = BB_END (edge_in->src);
1948 if (last
1949 && JUMP_P (last)
1950 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1951 && (extract_asm_operands (PATTERN (last))
1952 || JUMP_LABEL (last) == before)
1953 && patch_jump_insn (last, before, bb))
1954 df_set_bb_dirty (edge_in->src);
1955 }
1956 redirect_edge_succ (edge_in, bb);
1957 }
1958
1959 return bb;
1960 }
1961
1962 /* Queue instructions for insertion on an edge between two basic blocks.
1963 The new instructions and basic blocks (if any) will not appear in the
1964 CFG until commit_edge_insertions is called. */
1965
1966 void
insert_insn_on_edge(rtx pattern,edge e)1967 insert_insn_on_edge (rtx pattern, edge e)
1968 {
1969 /* We cannot insert instructions on an abnormal critical edge.
1970 It will be easier to find the culprit if we die now. */
1971 gcc_assert (!((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)));
1972
1973 if (e->insns.r == NULL_RTX)
1974 start_sequence ();
1975 else
1976 push_to_sequence (e->insns.r);
1977
1978 emit_insn (pattern);
1979
1980 e->insns.r = get_insns ();
1981 end_sequence ();
1982 }
1983
1984 /* Update the CFG for the instructions queued on edge E. */
1985
1986 void
commit_one_edge_insertion(edge e)1987 commit_one_edge_insertion (edge e)
1988 {
1989 rtx_insn *before = NULL, *after = NULL, *insns, *tmp, *last;
1990 basic_block bb;
1991
1992 /* Pull the insns off the edge now since the edge might go away. */
1993 insns = e->insns.r;
1994 e->insns.r = NULL;
1995
1996 /* Figure out where to put these insns. If the destination has
1997 one predecessor, insert there. Except for the exit block. */
1998 if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1999 {
2000 bb = e->dest;
2001
2002 /* Get the location correct wrt a code label, and "nice" wrt
2003 a basic block note, and before everything else. */
2004 tmp = BB_HEAD (bb);
2005 if (LABEL_P (tmp))
2006 tmp = NEXT_INSN (tmp);
2007 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
2008 tmp = NEXT_INSN (tmp);
2009 if (tmp == BB_HEAD (bb))
2010 before = tmp;
2011 else if (tmp)
2012 after = PREV_INSN (tmp);
2013 else
2014 after = get_last_insn ();
2015 }
2016
2017 /* If the source has one successor and the edge is not abnormal,
2018 insert there. Except for the entry block.
2019 Don't do this if the predecessor ends in a jump other than
2020 unconditional simple jump. E.g. for asm goto that points all
2021 its labels at the fallthru basic block, we can't insert instructions
2022 before the asm goto, as the asm goto can have various of side effects,
2023 and can't emit instructions after the asm goto, as it must end
2024 the basic block. */
2025 else if ((e->flags & EDGE_ABNORMAL) == 0
2026 && single_succ_p (e->src)
2027 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2028 && (!JUMP_P (BB_END (e->src))
2029 || simplejump_p (BB_END (e->src))))
2030 {
2031 bb = e->src;
2032
2033 /* It is possible to have a non-simple jump here. Consider a target
2034 where some forms of unconditional jumps clobber a register. This
2035 happens on the fr30 for example.
2036
2037 We know this block has a single successor, so we can just emit
2038 the queued insns before the jump. */
2039 if (JUMP_P (BB_END (bb)))
2040 before = BB_END (bb);
2041 else
2042 {
2043 /* We'd better be fallthru, or we've lost track of what's what. */
2044 gcc_assert (e->flags & EDGE_FALLTHRU);
2045
2046 after = BB_END (bb);
2047 }
2048 }
2049
2050 /* Otherwise we must split the edge. */
2051 else
2052 {
2053 bb = split_edge (e);
2054
2055 /* If E crossed a partition boundary, we needed to make bb end in
2056 a region-crossing jump, even though it was originally fallthru. */
2057 if (JUMP_P (BB_END (bb)))
2058 before = BB_END (bb);
2059 else
2060 after = BB_END (bb);
2061 }
2062
2063 /* Now that we've found the spot, do the insertion. */
2064 if (before)
2065 {
2066 emit_insn_before_noloc (insns, before, bb);
2067 last = prev_nonnote_insn (before);
2068 }
2069 else
2070 last = emit_insn_after_noloc (insns, after, bb);
2071
2072 if (returnjump_p (last))
2073 {
2074 /* ??? Remove all outgoing edges from BB and add one for EXIT.
2075 This is not currently a problem because this only happens
2076 for the (single) epilogue, which already has a fallthru edge
2077 to EXIT. */
2078
2079 e = single_succ_edge (bb);
2080 gcc_assert (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
2081 && single_succ_p (bb) && (e->flags & EDGE_FALLTHRU));
2082
2083 e->flags &= ~EDGE_FALLTHRU;
2084 emit_barrier_after (last);
2085
2086 if (before)
2087 delete_insn (before);
2088 }
2089 else
2090 gcc_assert (!JUMP_P (last));
2091 }
2092
2093 /* Update the CFG for all queued instructions. */
2094
2095 void
commit_edge_insertions(void)2096 commit_edge_insertions (void)
2097 {
2098 basic_block bb;
2099
2100 /* Optimization passes that invoke this routine can cause hot blocks
2101 previously reached by both hot and cold blocks to become dominated only
2102 by cold blocks. This will cause the verification below to fail,
2103 and lead to now cold code in the hot section. In some cases this
2104 may only be visible after newly unreachable blocks are deleted,
2105 which will be done by fixup_partitions. */
2106 fixup_partitions ();
2107
2108 checking_verify_flow_info ();
2109
2110 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
2111 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
2112 {
2113 edge e;
2114 edge_iterator ei;
2115
2116 FOR_EACH_EDGE (e, ei, bb->succs)
2117 if (e->insns.r)
2118 commit_one_edge_insertion (e);
2119 }
2120 }
2121
2122
2123 /* Print out RTL-specific basic block information (live information
2124 at start and end with TDF_DETAILS). FLAGS are the TDF_* masks
2125 documented in dumpfile.h. */
2126
2127 static void
rtl_dump_bb(FILE * outf,basic_block bb,int indent,dump_flags_t flags)2128 rtl_dump_bb (FILE *outf, basic_block bb, int indent, dump_flags_t flags)
2129 {
2130 char *s_indent;
2131
2132 s_indent = (char *) alloca ((size_t) indent + 1);
2133 memset (s_indent, ' ', (size_t) indent);
2134 s_indent[indent] = '\0';
2135
2136 if (df && (flags & TDF_DETAILS))
2137 {
2138 df_dump_top (bb, outf);
2139 putc ('\n', outf);
2140 }
2141
2142 if (bb->index != ENTRY_BLOCK && bb->index != EXIT_BLOCK)
2143 {
2144 rtx_insn *last = BB_END (bb);
2145 if (last)
2146 last = NEXT_INSN (last);
2147 for (rtx_insn *insn = BB_HEAD (bb); insn != last; insn = NEXT_INSN (insn))
2148 {
2149 if (flags & TDF_DETAILS)
2150 df_dump_insn_top (insn, outf);
2151 if (! (flags & TDF_SLIM))
2152 print_rtl_single (outf, insn);
2153 else
2154 dump_insn_slim (outf, insn);
2155 if (flags & TDF_DETAILS)
2156 df_dump_insn_bottom (insn, outf);
2157 }
2158 }
2159
2160 if (df && (flags & TDF_DETAILS))
2161 {
2162 df_dump_bottom (bb, outf);
2163 putc ('\n', outf);
2164 }
2165
2166 }
2167
2168 /* Like dump_function_to_file, but for RTL. Print out dataflow information
2169 for the start of each basic block. FLAGS are the TDF_* masks documented
2170 in dumpfile.h. */
2171
2172 void
print_rtl_with_bb(FILE * outf,const rtx_insn * rtx_first,dump_flags_t flags)2173 print_rtl_with_bb (FILE *outf, const rtx_insn *rtx_first, dump_flags_t flags)
2174 {
2175 const rtx_insn *tmp_rtx;
2176 if (rtx_first == 0)
2177 fprintf (outf, "(nil)\n");
2178 else
2179 {
2180 enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
2181 int max_uid = get_max_uid ();
2182 basic_block *start = XCNEWVEC (basic_block, max_uid);
2183 basic_block *end = XCNEWVEC (basic_block, max_uid);
2184 enum bb_state *in_bb_p = XCNEWVEC (enum bb_state, max_uid);
2185 basic_block bb;
2186
2187 /* After freeing the CFG, we still have BLOCK_FOR_INSN set on most
2188 insns, but the CFG is not maintained so the basic block info
2189 is not reliable. Therefore it's omitted from the dumps. */
2190 if (! (cfun->curr_properties & PROP_cfg))
2191 flags &= ~TDF_BLOCKS;
2192
2193 if (df)
2194 df_dump_start (outf);
2195
2196 if (flags & TDF_BLOCKS)
2197 {
2198 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2199 {
2200 rtx_insn *x;
2201
2202 start[INSN_UID (BB_HEAD (bb))] = bb;
2203 end[INSN_UID (BB_END (bb))] = bb;
2204 for (x = BB_HEAD (bb); x != NULL_RTX; x = NEXT_INSN (x))
2205 {
2206 enum bb_state state = IN_MULTIPLE_BB;
2207
2208 if (in_bb_p[INSN_UID (x)] == NOT_IN_BB)
2209 state = IN_ONE_BB;
2210 in_bb_p[INSN_UID (x)] = state;
2211
2212 if (x == BB_END (bb))
2213 break;
2214 }
2215 }
2216 }
2217
2218 for (tmp_rtx = rtx_first; tmp_rtx != NULL; tmp_rtx = NEXT_INSN (tmp_rtx))
2219 {
2220 if (flags & TDF_BLOCKS)
2221 {
2222 bb = start[INSN_UID (tmp_rtx)];
2223 if (bb != NULL)
2224 {
2225 dump_bb_info (outf, bb, 0, dump_flags, true, false);
2226 if (df && (flags & TDF_DETAILS))
2227 df_dump_top (bb, outf);
2228 }
2229
2230 if (in_bb_p[INSN_UID (tmp_rtx)] == NOT_IN_BB
2231 && !NOTE_P (tmp_rtx)
2232 && !BARRIER_P (tmp_rtx))
2233 fprintf (outf, ";; Insn is not within a basic block\n");
2234 else if (in_bb_p[INSN_UID (tmp_rtx)] == IN_MULTIPLE_BB)
2235 fprintf (outf, ";; Insn is in multiple basic blocks\n");
2236 }
2237
2238 if (flags & TDF_DETAILS)
2239 df_dump_insn_top (tmp_rtx, outf);
2240 if (! (flags & TDF_SLIM))
2241 print_rtl_single (outf, tmp_rtx);
2242 else
2243 dump_insn_slim (outf, tmp_rtx);
2244 if (flags & TDF_DETAILS)
2245 df_dump_insn_bottom (tmp_rtx, outf);
2246
2247 if (flags & TDF_BLOCKS)
2248 {
2249 bb = end[INSN_UID (tmp_rtx)];
2250 if (bb != NULL)
2251 {
2252 dump_bb_info (outf, bb, 0, dump_flags, false, true);
2253 if (df && (flags & TDF_DETAILS))
2254 df_dump_bottom (bb, outf);
2255 putc ('\n', outf);
2256 }
2257 }
2258 }
2259
2260 free (start);
2261 free (end);
2262 free (in_bb_p);
2263 }
2264 }
2265
2266 /* Update the branch probability of BB if a REG_BR_PROB is present. */
2267
2268 void
update_br_prob_note(basic_block bb)2269 update_br_prob_note (basic_block bb)
2270 {
2271 rtx note;
2272 note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX);
2273 if (!JUMP_P (BB_END (bb)) || !BRANCH_EDGE (bb)->probability.initialized_p ())
2274 {
2275 if (note)
2276 {
2277 rtx *note_link, this_rtx;
2278
2279 note_link = ®_NOTES (BB_END (bb));
2280 for (this_rtx = *note_link; this_rtx; this_rtx = XEXP (this_rtx, 1))
2281 if (this_rtx == note)
2282 {
2283 *note_link = XEXP (this_rtx, 1);
2284 break;
2285 }
2286 }
2287 return;
2288 }
2289 if (!note
2290 || XINT (note, 0) == BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ())
2291 return;
2292 XINT (note, 0) = BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ();
2293 }
2294
2295 /* Get the last insn associated with block BB (that includes barriers and
2296 tablejumps after BB). */
2297 rtx_insn *
get_last_bb_insn(basic_block bb)2298 get_last_bb_insn (basic_block bb)
2299 {
2300 rtx_jump_table_data *table;
2301 rtx_insn *tmp;
2302 rtx_insn *end = BB_END (bb);
2303
2304 /* Include any jump table following the basic block. */
2305 if (tablejump_p (end, NULL, &table))
2306 end = table;
2307
2308 /* Include any barriers that may follow the basic block. */
2309 tmp = next_nonnote_nondebug_insn_bb (end);
2310 while (tmp && BARRIER_P (tmp))
2311 {
2312 end = tmp;
2313 tmp = next_nonnote_nondebug_insn_bb (end);
2314 }
2315
2316 return end;
2317 }
2318
2319 /* Add all BBs reachable from entry via hot paths into the SET. */
2320
2321 void
find_bbs_reachable_by_hot_paths(hash_set<basic_block> * set)2322 find_bbs_reachable_by_hot_paths (hash_set<basic_block> *set)
2323 {
2324 auto_vec<basic_block, 64> worklist;
2325
2326 set->add (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2327 worklist.safe_push (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2328
2329 while (worklist.length () > 0)
2330 {
2331 basic_block bb = worklist.pop ();
2332 edge_iterator ei;
2333 edge e;
2334
2335 FOR_EACH_EDGE (e, ei, bb->succs)
2336 if (BB_PARTITION (e->dest) != BB_COLD_PARTITION
2337 && !set->add (e->dest))
2338 worklist.safe_push (e->dest);
2339 }
2340 }
2341
2342 /* Sanity check partition hotness to ensure that basic blocks in
2343 the cold partition don't dominate basic blocks in the hot partition.
2344 If FLAG_ONLY is true, report violations as errors. Otherwise
2345 re-mark the dominated blocks as cold, since this is run after
2346 cfg optimizations that may make hot blocks previously reached
2347 by both hot and cold blocks now only reachable along cold paths. */
2348
2349 static vec<basic_block>
find_partition_fixes(bool flag_only)2350 find_partition_fixes (bool flag_only)
2351 {
2352 basic_block bb;
2353 vec<basic_block> bbs_in_cold_partition = vNULL;
2354 vec<basic_block> bbs_to_fix = vNULL;
2355 hash_set<basic_block> set;
2356
2357 /* Callers check this. */
2358 gcc_checking_assert (crtl->has_bb_partition);
2359
2360 find_bbs_reachable_by_hot_paths (&set);
2361
2362 FOR_EACH_BB_FN (bb, cfun)
2363 if (!set.contains (bb)
2364 && BB_PARTITION (bb) != BB_COLD_PARTITION)
2365 {
2366 if (flag_only)
2367 error ("non-cold basic block %d reachable only "
2368 "by paths crossing the cold partition", bb->index);
2369 else
2370 BB_SET_PARTITION (bb, BB_COLD_PARTITION);
2371 bbs_to_fix.safe_push (bb);
2372 bbs_in_cold_partition.safe_push (bb);
2373 }
2374
2375 return bbs_to_fix;
2376 }
2377
2378 /* Perform cleanup on the hot/cold bb partitioning after optimization
2379 passes that modify the cfg. */
2380
2381 void
fixup_partitions(void)2382 fixup_partitions (void)
2383 {
2384 basic_block bb;
2385
2386 if (!crtl->has_bb_partition)
2387 return;
2388
2389 /* Delete any blocks that became unreachable and weren't
2390 already cleaned up, for example during edge forwarding
2391 and convert_jumps_to_returns. This will expose more
2392 opportunities for fixing the partition boundaries here.
2393 Also, the calculation of the dominance graph during verification
2394 will assert if there are unreachable nodes. */
2395 delete_unreachable_blocks ();
2396
2397 /* If there are partitions, do a sanity check on them: A basic block in
2398 a cold partition cannot dominate a basic block in a hot partition.
2399 Fixup any that now violate this requirement, as a result of edge
2400 forwarding and unreachable block deletion. */
2401 vec<basic_block> bbs_to_fix = find_partition_fixes (false);
2402
2403 /* Do the partition fixup after all necessary blocks have been converted to
2404 cold, so that we only update the region crossings the minimum number of
2405 places, which can require forcing edges to be non fallthru. */
2406 while (! bbs_to_fix.is_empty ())
2407 {
2408 bb = bbs_to_fix.pop ();
2409 fixup_new_cold_bb (bb);
2410 }
2411 }
2412
2413 /* Verify, in the basic block chain, that there is at most one switch
2414 between hot/cold partitions. This condition will not be true until
2415 after reorder_basic_blocks is called. */
2416
2417 static int
verify_hot_cold_block_grouping(void)2418 verify_hot_cold_block_grouping (void)
2419 {
2420 basic_block bb;
2421 int err = 0;
2422 bool switched_sections = false;
2423 int current_partition = BB_UNPARTITIONED;
2424
2425 /* Even after bb reordering is complete, we go into cfglayout mode
2426 again (in compgoto). Ensure we don't call this before going back
2427 into linearized RTL when any layout fixes would have been committed. */
2428 if (!crtl->bb_reorder_complete
2429 || current_ir_type () != IR_RTL_CFGRTL)
2430 return err;
2431
2432 FOR_EACH_BB_FN (bb, cfun)
2433 {
2434 if (current_partition != BB_UNPARTITIONED
2435 && BB_PARTITION (bb) != current_partition)
2436 {
2437 if (switched_sections)
2438 {
2439 error ("multiple hot/cold transitions found (bb %i)",
2440 bb->index);
2441 err = 1;
2442 }
2443 else
2444 switched_sections = true;
2445
2446 if (!crtl->has_bb_partition)
2447 error ("partition found but function partition flag not set");
2448 }
2449 current_partition = BB_PARTITION (bb);
2450 }
2451
2452 return err;
2453 }
2454
2455
2456 /* Perform several checks on the edges out of each block, such as
2457 the consistency of the branch probabilities, the correctness
2458 of hot/cold partition crossing edges, and the number of expected
2459 successor edges. Also verify that the dominance relationship
2460 between hot/cold blocks is sane. */
2461
2462 static int
rtl_verify_edges(void)2463 rtl_verify_edges (void)
2464 {
2465 int err = 0;
2466 basic_block bb;
2467
2468 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2469 {
2470 int n_fallthru = 0, n_branch = 0, n_abnormal_call = 0, n_sibcall = 0;
2471 int n_eh = 0, n_abnormal = 0;
2472 edge e, fallthru = NULL;
2473 edge_iterator ei;
2474 rtx note;
2475 bool has_crossing_edge = false;
2476
2477 if (JUMP_P (BB_END (bb))
2478 && (note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX))
2479 && EDGE_COUNT (bb->succs) >= 2
2480 && any_condjump_p (BB_END (bb)))
2481 {
2482 if (!BRANCH_EDGE (bb)->probability.initialized_p ())
2483 {
2484 if (profile_status_for_fn (cfun) != PROFILE_ABSENT)
2485 {
2486 error ("verify_flow_info: "
2487 "REG_BR_PROB is set but cfg probability is not");
2488 err = 1;
2489 }
2490 }
2491 else if (XINT (note, 0)
2492 != BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ()
2493 && profile_status_for_fn (cfun) != PROFILE_ABSENT)
2494 {
2495 error ("verify_flow_info: REG_BR_PROB does not match cfg %i %i",
2496 XINT (note, 0),
2497 BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ());
2498 err = 1;
2499 }
2500 }
2501
2502 FOR_EACH_EDGE (e, ei, bb->succs)
2503 {
2504 bool is_crossing;
2505
2506 if (e->flags & EDGE_FALLTHRU)
2507 n_fallthru++, fallthru = e;
2508
2509 is_crossing = (BB_PARTITION (e->src) != BB_PARTITION (e->dest)
2510 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2511 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun));
2512 has_crossing_edge |= is_crossing;
2513 if (e->flags & EDGE_CROSSING)
2514 {
2515 if (!is_crossing)
2516 {
2517 error ("EDGE_CROSSING incorrectly set across same section");
2518 err = 1;
2519 }
2520 if (e->flags & EDGE_FALLTHRU)
2521 {
2522 error ("fallthru edge crosses section boundary in bb %i",
2523 e->src->index);
2524 err = 1;
2525 }
2526 if (e->flags & EDGE_EH)
2527 {
2528 error ("EH edge crosses section boundary in bb %i",
2529 e->src->index);
2530 err = 1;
2531 }
2532 if (JUMP_P (BB_END (bb)) && !CROSSING_JUMP_P (BB_END (bb)))
2533 {
2534 error ("No region crossing jump at section boundary in bb %i",
2535 bb->index);
2536 err = 1;
2537 }
2538 }
2539 else if (is_crossing)
2540 {
2541 error ("EDGE_CROSSING missing across section boundary");
2542 err = 1;
2543 }
2544
2545 if ((e->flags & ~(EDGE_DFS_BACK
2546 | EDGE_CAN_FALLTHRU
2547 | EDGE_IRREDUCIBLE_LOOP
2548 | EDGE_LOOP_EXIT
2549 | EDGE_CROSSING
2550 | EDGE_PRESERVE)) == 0)
2551 n_branch++;
2552
2553 if (e->flags & EDGE_ABNORMAL_CALL)
2554 n_abnormal_call++;
2555
2556 if (e->flags & EDGE_SIBCALL)
2557 n_sibcall++;
2558
2559 if (e->flags & EDGE_EH)
2560 n_eh++;
2561
2562 if (e->flags & EDGE_ABNORMAL)
2563 n_abnormal++;
2564 }
2565
2566 if (!has_crossing_edge
2567 && JUMP_P (BB_END (bb))
2568 && CROSSING_JUMP_P (BB_END (bb)))
2569 {
2570 print_rtl_with_bb (stderr, get_insns (), TDF_BLOCKS | TDF_DETAILS);
2571 error ("Region crossing jump across same section in bb %i",
2572 bb->index);
2573 err = 1;
2574 }
2575
2576 if (n_eh && !find_reg_note (BB_END (bb), REG_EH_REGION, NULL_RTX))
2577 {
2578 error ("missing REG_EH_REGION note at the end of bb %i", bb->index);
2579 err = 1;
2580 }
2581 if (n_eh > 1)
2582 {
2583 error ("too many exception handling edges in bb %i", bb->index);
2584 err = 1;
2585 }
2586 if (n_branch
2587 && (!JUMP_P (BB_END (bb))
2588 || (n_branch > 1 && (any_uncondjump_p (BB_END (bb))
2589 || any_condjump_p (BB_END (bb))))))
2590 {
2591 error ("too many outgoing branch edges from bb %i", bb->index);
2592 err = 1;
2593 }
2594 if (n_fallthru && any_uncondjump_p (BB_END (bb)))
2595 {
2596 error ("fallthru edge after unconditional jump in bb %i", bb->index);
2597 err = 1;
2598 }
2599 if (n_branch != 1 && any_uncondjump_p (BB_END (bb)))
2600 {
2601 error ("wrong number of branch edges after unconditional jump"
2602 " in bb %i", bb->index);
2603 err = 1;
2604 }
2605 if (n_branch != 1 && any_condjump_p (BB_END (bb))
2606 && JUMP_LABEL (BB_END (bb)) != BB_HEAD (fallthru->dest))
2607 {
2608 error ("wrong amount of branch edges after conditional jump"
2609 " in bb %i", bb->index);
2610 err = 1;
2611 }
2612 if (n_abnormal_call && !CALL_P (BB_END (bb)))
2613 {
2614 error ("abnormal call edges for non-call insn in bb %i", bb->index);
2615 err = 1;
2616 }
2617 if (n_sibcall && !CALL_P (BB_END (bb)))
2618 {
2619 error ("sibcall edges for non-call insn in bb %i", bb->index);
2620 err = 1;
2621 }
2622 if (n_abnormal > n_eh
2623 && !(CALL_P (BB_END (bb))
2624 && n_abnormal == n_abnormal_call + n_sibcall)
2625 && (!JUMP_P (BB_END (bb))
2626 || any_condjump_p (BB_END (bb))
2627 || any_uncondjump_p (BB_END (bb))))
2628 {
2629 error ("abnormal edges for no purpose in bb %i", bb->index);
2630 err = 1;
2631 }
2632
2633 int has_eh = -1;
2634 FOR_EACH_EDGE (e, ei, bb->preds)
2635 {
2636 if (has_eh == -1)
2637 has_eh = (e->flags & EDGE_EH);
2638 if ((e->flags & EDGE_EH) == has_eh)
2639 continue;
2640 error ("EH incoming edge mixed with non-EH incoming edges "
2641 "in bb %i", bb->index);
2642 err = 1;
2643 break;
2644 }
2645 }
2646
2647 /* If there are partitions, do a sanity check on them: A basic block in
2648 a cold partition cannot dominate a basic block in a hot partition. */
2649 if (crtl->has_bb_partition && !err
2650 && current_ir_type () == IR_RTL_CFGLAYOUT)
2651 {
2652 vec<basic_block> bbs_to_fix = find_partition_fixes (true);
2653 err = !bbs_to_fix.is_empty ();
2654 }
2655
2656 /* Clean up. */
2657 return err;
2658 }
2659
2660 /* Checks on the instructions within blocks. Currently checks that each
2661 block starts with a basic block note, and that basic block notes and
2662 control flow jumps are not found in the middle of the block. */
2663
2664 static int
rtl_verify_bb_insns(void)2665 rtl_verify_bb_insns (void)
2666 {
2667 rtx_insn *x;
2668 int err = 0;
2669 basic_block bb;
2670
2671 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2672 {
2673 /* Now check the header of basic
2674 block. It ought to contain optional CODE_LABEL followed
2675 by NOTE_BASIC_BLOCK. */
2676 x = BB_HEAD (bb);
2677 if (LABEL_P (x))
2678 {
2679 if (BB_END (bb) == x)
2680 {
2681 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2682 bb->index);
2683 err = 1;
2684 }
2685
2686 x = NEXT_INSN (x);
2687 }
2688
2689 if (!NOTE_INSN_BASIC_BLOCK_P (x) || NOTE_BASIC_BLOCK (x) != bb)
2690 {
2691 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2692 bb->index);
2693 err = 1;
2694 }
2695
2696 if (BB_END (bb) == x)
2697 /* Do checks for empty blocks here. */
2698 ;
2699 else
2700 for (x = NEXT_INSN (x); x; x = NEXT_INSN (x))
2701 {
2702 if (NOTE_INSN_BASIC_BLOCK_P (x))
2703 {
2704 error ("NOTE_INSN_BASIC_BLOCK %d in middle of basic block %d",
2705 INSN_UID (x), bb->index);
2706 err = 1;
2707 }
2708
2709 if (x == BB_END (bb))
2710 break;
2711
2712 if (control_flow_insn_p (x))
2713 {
2714 error ("in basic block %d:", bb->index);
2715 fatal_insn ("flow control insn inside a basic block", x);
2716 }
2717 }
2718 }
2719
2720 /* Clean up. */
2721 return err;
2722 }
2723
2724 /* Verify that block pointers for instructions in basic blocks, headers and
2725 footers are set appropriately. */
2726
2727 static int
rtl_verify_bb_pointers(void)2728 rtl_verify_bb_pointers (void)
2729 {
2730 int err = 0;
2731 basic_block bb;
2732
2733 /* Check the general integrity of the basic blocks. */
2734 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2735 {
2736 rtx_insn *insn;
2737
2738 if (!(bb->flags & BB_RTL))
2739 {
2740 error ("BB_RTL flag not set for block %d", bb->index);
2741 err = 1;
2742 }
2743
2744 FOR_BB_INSNS (bb, insn)
2745 if (BLOCK_FOR_INSN (insn) != bb)
2746 {
2747 error ("insn %d basic block pointer is %d, should be %d",
2748 INSN_UID (insn),
2749 BLOCK_FOR_INSN (insn) ? BLOCK_FOR_INSN (insn)->index : 0,
2750 bb->index);
2751 err = 1;
2752 }
2753
2754 for (insn = BB_HEADER (bb); insn; insn = NEXT_INSN (insn))
2755 if (!BARRIER_P (insn)
2756 && BLOCK_FOR_INSN (insn) != NULL)
2757 {
2758 error ("insn %d in header of bb %d has non-NULL basic block",
2759 INSN_UID (insn), bb->index);
2760 err = 1;
2761 }
2762 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2763 if (!BARRIER_P (insn)
2764 && BLOCK_FOR_INSN (insn) != NULL)
2765 {
2766 error ("insn %d in footer of bb %d has non-NULL basic block",
2767 INSN_UID (insn), bb->index);
2768 err = 1;
2769 }
2770 }
2771
2772 /* Clean up. */
2773 return err;
2774 }
2775
2776 /* Verify the CFG and RTL consistency common for both underlying RTL and
2777 cfglayout RTL.
2778
2779 Currently it does following checks:
2780
2781 - overlapping of basic blocks
2782 - insns with wrong BLOCK_FOR_INSN pointers
2783 - headers of basic blocks (the NOTE_INSN_BASIC_BLOCK note)
2784 - tails of basic blocks (ensure that boundary is necessary)
2785 - scans body of the basic block for JUMP_INSN, CODE_LABEL
2786 and NOTE_INSN_BASIC_BLOCK
2787 - verify that no fall_thru edge crosses hot/cold partition boundaries
2788 - verify that there are no pending RTL branch predictions
2789 - verify that hot blocks are not dominated by cold blocks
2790
2791 In future it can be extended check a lot of other stuff as well
2792 (reachability of basic blocks, life information, etc. etc.). */
2793
2794 static int
rtl_verify_flow_info_1(void)2795 rtl_verify_flow_info_1 (void)
2796 {
2797 int err = 0;
2798
2799 err |= rtl_verify_bb_pointers ();
2800
2801 err |= rtl_verify_bb_insns ();
2802
2803 err |= rtl_verify_edges ();
2804
2805 return err;
2806 }
2807
2808 /* Walk the instruction chain and verify that bb head/end pointers
2809 are correct, and that instructions are in exactly one bb and have
2810 correct block pointers. */
2811
2812 static int
rtl_verify_bb_insn_chain(void)2813 rtl_verify_bb_insn_chain (void)
2814 {
2815 basic_block bb;
2816 int err = 0;
2817 rtx_insn *x;
2818 rtx_insn *last_head = get_last_insn ();
2819 basic_block *bb_info;
2820 const int max_uid = get_max_uid ();
2821
2822 bb_info = XCNEWVEC (basic_block, max_uid);
2823
2824 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2825 {
2826 rtx_insn *head = BB_HEAD (bb);
2827 rtx_insn *end = BB_END (bb);
2828
2829 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2830 {
2831 /* Verify the end of the basic block is in the INSN chain. */
2832 if (x == end)
2833 break;
2834
2835 /* And that the code outside of basic blocks has NULL bb field. */
2836 if (!BARRIER_P (x)
2837 && BLOCK_FOR_INSN (x) != NULL)
2838 {
2839 error ("insn %d outside of basic blocks has non-NULL bb field",
2840 INSN_UID (x));
2841 err = 1;
2842 }
2843 }
2844
2845 if (!x)
2846 {
2847 error ("end insn %d for block %d not found in the insn stream",
2848 INSN_UID (end), bb->index);
2849 err = 1;
2850 }
2851
2852 /* Work backwards from the end to the head of the basic block
2853 to verify the head is in the RTL chain. */
2854 for (; x != NULL_RTX; x = PREV_INSN (x))
2855 {
2856 /* While walking over the insn chain, verify insns appear
2857 in only one basic block. */
2858 if (bb_info[INSN_UID (x)] != NULL)
2859 {
2860 error ("insn %d is in multiple basic blocks (%d and %d)",
2861 INSN_UID (x), bb->index, bb_info[INSN_UID (x)]->index);
2862 err = 1;
2863 }
2864
2865 bb_info[INSN_UID (x)] = bb;
2866
2867 if (x == head)
2868 break;
2869 }
2870 if (!x)
2871 {
2872 error ("head insn %d for block %d not found in the insn stream",
2873 INSN_UID (head), bb->index);
2874 err = 1;
2875 }
2876
2877 last_head = PREV_INSN (x);
2878 }
2879
2880 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2881 {
2882 /* Check that the code before the first basic block has NULL
2883 bb field. */
2884 if (!BARRIER_P (x)
2885 && BLOCK_FOR_INSN (x) != NULL)
2886 {
2887 error ("insn %d outside of basic blocks has non-NULL bb field",
2888 INSN_UID (x));
2889 err = 1;
2890 }
2891 }
2892 free (bb_info);
2893
2894 return err;
2895 }
2896
2897 /* Verify that fallthru edges point to adjacent blocks in layout order and
2898 that barriers exist after non-fallthru blocks. */
2899
2900 static int
rtl_verify_fallthru(void)2901 rtl_verify_fallthru (void)
2902 {
2903 basic_block bb;
2904 int err = 0;
2905
2906 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2907 {
2908 edge e;
2909
2910 e = find_fallthru_edge (bb->succs);
2911 if (!e)
2912 {
2913 rtx_insn *insn;
2914
2915 /* Ensure existence of barrier in BB with no fallthru edges. */
2916 for (insn = NEXT_INSN (BB_END (bb)); ; insn = NEXT_INSN (insn))
2917 {
2918 if (!insn || NOTE_INSN_BASIC_BLOCK_P (insn))
2919 {
2920 error ("missing barrier after block %i", bb->index);
2921 err = 1;
2922 break;
2923 }
2924 if (BARRIER_P (insn))
2925 break;
2926 }
2927 }
2928 else if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2929 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2930 {
2931 rtx_insn *insn;
2932
2933 if (e->src->next_bb != e->dest)
2934 {
2935 error
2936 ("verify_flow_info: Incorrect blocks for fallthru %i->%i",
2937 e->src->index, e->dest->index);
2938 err = 1;
2939 }
2940 else
2941 for (insn = NEXT_INSN (BB_END (e->src)); insn != BB_HEAD (e->dest);
2942 insn = NEXT_INSN (insn))
2943 if (BARRIER_P (insn) || NONDEBUG_INSN_P (insn))
2944 {
2945 error ("verify_flow_info: Incorrect fallthru %i->%i",
2946 e->src->index, e->dest->index);
2947 fatal_insn ("wrong insn in the fallthru edge", insn);
2948 err = 1;
2949 }
2950 }
2951 }
2952
2953 return err;
2954 }
2955
2956 /* Verify that blocks are laid out in consecutive order. While walking the
2957 instructions, verify that all expected instructions are inside the basic
2958 blocks, and that all returns are followed by barriers. */
2959
2960 static int
rtl_verify_bb_layout(void)2961 rtl_verify_bb_layout (void)
2962 {
2963 basic_block bb;
2964 int err = 0;
2965 rtx_insn *x, *y;
2966 int num_bb_notes;
2967 rtx_insn * const rtx_first = get_insns ();
2968 basic_block last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun), curr_bb = NULL;
2969
2970 num_bb_notes = 0;
2971 last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun);
2972
2973 for (x = rtx_first; x; x = NEXT_INSN (x))
2974 {
2975 if (NOTE_INSN_BASIC_BLOCK_P (x))
2976 {
2977 bb = NOTE_BASIC_BLOCK (x);
2978
2979 num_bb_notes++;
2980 if (bb != last_bb_seen->next_bb)
2981 internal_error ("basic blocks not laid down consecutively");
2982
2983 curr_bb = last_bb_seen = bb;
2984 }
2985
2986 if (!curr_bb)
2987 {
2988 switch (GET_CODE (x))
2989 {
2990 case BARRIER:
2991 case NOTE:
2992 break;
2993
2994 case CODE_LABEL:
2995 /* An ADDR_VEC is placed outside any basic block. */
2996 if (NEXT_INSN (x)
2997 && JUMP_TABLE_DATA_P (NEXT_INSN (x)))
2998 x = NEXT_INSN (x);
2999
3000 /* But in any case, non-deletable labels can appear anywhere. */
3001 break;
3002
3003 default:
3004 fatal_insn ("insn outside basic block", x);
3005 }
3006 }
3007
3008 if (JUMP_P (x)
3009 && returnjump_p (x) && ! condjump_p (x)
3010 && ! ((y = next_nonnote_nondebug_insn (x))
3011 && BARRIER_P (y)))
3012 fatal_insn ("return not followed by barrier", x);
3013
3014 if (curr_bb && x == BB_END (curr_bb))
3015 curr_bb = NULL;
3016 }
3017
3018 if (num_bb_notes != n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS)
3019 internal_error
3020 ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)",
3021 num_bb_notes, n_basic_blocks_for_fn (cfun));
3022
3023 return err;
3024 }
3025
3026 /* Verify the CFG and RTL consistency common for both underlying RTL and
3027 cfglayout RTL, plus consistency checks specific to linearized RTL mode.
3028
3029 Currently it does following checks:
3030 - all checks of rtl_verify_flow_info_1
3031 - test head/end pointers
3032 - check that blocks are laid out in consecutive order
3033 - check that all insns are in the basic blocks
3034 (except the switch handling code, barriers and notes)
3035 - check that all returns are followed by barriers
3036 - check that all fallthru edge points to the adjacent blocks
3037 - verify that there is a single hot/cold partition boundary after bbro */
3038
3039 static int
rtl_verify_flow_info(void)3040 rtl_verify_flow_info (void)
3041 {
3042 int err = 0;
3043
3044 err |= rtl_verify_flow_info_1 ();
3045
3046 err |= rtl_verify_bb_insn_chain ();
3047
3048 err |= rtl_verify_fallthru ();
3049
3050 err |= rtl_verify_bb_layout ();
3051
3052 err |= verify_hot_cold_block_grouping ();
3053
3054 return err;
3055 }
3056
3057 /* Assume that the preceding pass has possibly eliminated jump instructions
3058 or converted the unconditional jumps. Eliminate the edges from CFG.
3059 Return true if any edges are eliminated. */
3060
3061 bool
purge_dead_edges(basic_block bb)3062 purge_dead_edges (basic_block bb)
3063 {
3064 edge e;
3065 rtx_insn *insn = BB_END (bb);
3066 rtx note;
3067 bool purged = false;
3068 bool found;
3069 edge_iterator ei;
3070
3071 if ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb))
3072 do
3073 insn = PREV_INSN (insn);
3074 while ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb));
3075
3076 /* If this instruction cannot trap, remove REG_EH_REGION notes. */
3077 if (NONJUMP_INSN_P (insn)
3078 && (note = find_reg_note (insn, REG_EH_REGION, NULL)))
3079 {
3080 rtx eqnote;
3081
3082 if (! may_trap_p (PATTERN (insn))
3083 || ((eqnote = find_reg_equal_equiv_note (insn))
3084 && ! may_trap_p (XEXP (eqnote, 0))))
3085 remove_note (insn, note);
3086 }
3087
3088 /* Cleanup abnormal edges caused by exceptions or non-local gotos. */
3089 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3090 {
3091 bool remove = false;
3092
3093 /* There are three types of edges we need to handle correctly here: EH
3094 edges, abnormal call EH edges, and abnormal call non-EH edges. The
3095 latter can appear when nonlocal gotos are used. */
3096 if (e->flags & EDGE_ABNORMAL_CALL)
3097 {
3098 if (!CALL_P (insn))
3099 remove = true;
3100 else if (can_nonlocal_goto (insn))
3101 ;
3102 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3103 ;
3104 else if (flag_tm && find_reg_note (insn, REG_TM, NULL))
3105 ;
3106 else
3107 remove = true;
3108 }
3109 else if (e->flags & EDGE_EH)
3110 remove = !can_throw_internal (insn);
3111
3112 if (remove)
3113 {
3114 remove_edge (e);
3115 df_set_bb_dirty (bb);
3116 purged = true;
3117 }
3118 else
3119 ei_next (&ei);
3120 }
3121
3122 if (JUMP_P (insn))
3123 {
3124 rtx note;
3125 edge b,f;
3126 edge_iterator ei;
3127
3128 /* We do care only about conditional jumps and simplejumps. */
3129 if (!any_condjump_p (insn)
3130 && !returnjump_p (insn)
3131 && !simplejump_p (insn))
3132 return purged;
3133
3134 /* Branch probability/prediction notes are defined only for
3135 condjumps. We've possibly turned condjump into simplejump. */
3136 if (simplejump_p (insn))
3137 {
3138 note = find_reg_note (insn, REG_BR_PROB, NULL);
3139 if (note)
3140 remove_note (insn, note);
3141 while ((note = find_reg_note (insn, REG_BR_PRED, NULL)))
3142 remove_note (insn, note);
3143 }
3144
3145 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3146 {
3147 /* Avoid abnormal flags to leak from computed jumps turned
3148 into simplejumps. */
3149
3150 e->flags &= ~EDGE_ABNORMAL;
3151
3152 /* See if this edge is one we should keep. */
3153 if ((e->flags & EDGE_FALLTHRU) && any_condjump_p (insn))
3154 /* A conditional jump can fall through into the next
3155 block, so we should keep the edge. */
3156 {
3157 ei_next (&ei);
3158 continue;
3159 }
3160 else if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
3161 && BB_HEAD (e->dest) == JUMP_LABEL (insn))
3162 /* If the destination block is the target of the jump,
3163 keep the edge. */
3164 {
3165 ei_next (&ei);
3166 continue;
3167 }
3168 else if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
3169 && returnjump_p (insn))
3170 /* If the destination block is the exit block, and this
3171 instruction is a return, then keep the edge. */
3172 {
3173 ei_next (&ei);
3174 continue;
3175 }
3176 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3177 /* Keep the edges that correspond to exceptions thrown by
3178 this instruction and rematerialize the EDGE_ABNORMAL
3179 flag we just cleared above. */
3180 {
3181 e->flags |= EDGE_ABNORMAL;
3182 ei_next (&ei);
3183 continue;
3184 }
3185
3186 /* We do not need this edge. */
3187 df_set_bb_dirty (bb);
3188 purged = true;
3189 remove_edge (e);
3190 }
3191
3192 if (EDGE_COUNT (bb->succs) == 0 || !purged)
3193 return purged;
3194
3195 if (dump_file)
3196 fprintf (dump_file, "Purged edges from bb %i\n", bb->index);
3197
3198 if (!optimize)
3199 return purged;
3200
3201 /* Redistribute probabilities. */
3202 if (single_succ_p (bb))
3203 {
3204 single_succ_edge (bb)->probability = profile_probability::always ();
3205 }
3206 else
3207 {
3208 note = find_reg_note (insn, REG_BR_PROB, NULL);
3209 if (!note)
3210 return purged;
3211
3212 b = BRANCH_EDGE (bb);
3213 f = FALLTHRU_EDGE (bb);
3214 b->probability = profile_probability::from_reg_br_prob_note
3215 (XINT (note, 0));
3216 f->probability = b->probability.invert ();
3217 }
3218
3219 return purged;
3220 }
3221 else if (CALL_P (insn) && SIBLING_CALL_P (insn))
3222 {
3223 /* First, there should not be any EH or ABCALL edges resulting
3224 from non-local gotos and the like. If there were, we shouldn't
3225 have created the sibcall in the first place. Second, there
3226 should of course never have been a fallthru edge. */
3227 gcc_assert (single_succ_p (bb));
3228 gcc_assert (single_succ_edge (bb)->flags
3229 == (EDGE_SIBCALL | EDGE_ABNORMAL));
3230
3231 return 0;
3232 }
3233
3234 /* If we don't see a jump insn, we don't know exactly why the block would
3235 have been broken at this point. Look for a simple, non-fallthru edge,
3236 as these are only created by conditional branches. If we find such an
3237 edge we know that there used to be a jump here and can then safely
3238 remove all non-fallthru edges. */
3239 found = false;
3240 FOR_EACH_EDGE (e, ei, bb->succs)
3241 if (! (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)))
3242 {
3243 found = true;
3244 break;
3245 }
3246
3247 if (!found)
3248 return purged;
3249
3250 /* Remove all but the fake and fallthru edges. The fake edge may be
3251 the only successor for this block in the case of noreturn
3252 calls. */
3253 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3254 {
3255 if (!(e->flags & (EDGE_FALLTHRU | EDGE_FAKE)))
3256 {
3257 df_set_bb_dirty (bb);
3258 remove_edge (e);
3259 purged = true;
3260 }
3261 else
3262 ei_next (&ei);
3263 }
3264
3265 gcc_assert (single_succ_p (bb));
3266
3267 single_succ_edge (bb)->probability = profile_probability::always ();
3268
3269 if (dump_file)
3270 fprintf (dump_file, "Purged non-fallthru edges from bb %i\n",
3271 bb->index);
3272 return purged;
3273 }
3274
3275 /* Search all basic blocks for potentially dead edges and purge them. Return
3276 true if some edge has been eliminated. */
3277
3278 bool
purge_all_dead_edges(void)3279 purge_all_dead_edges (void)
3280 {
3281 int purged = false;
3282 basic_block bb;
3283
3284 FOR_EACH_BB_FN (bb, cfun)
3285 {
3286 bool purged_here = purge_dead_edges (bb);
3287
3288 purged |= purged_here;
3289 }
3290
3291 return purged;
3292 }
3293
3294 /* This is used by a few passes that emit some instructions after abnormal
3295 calls, moving the basic block's end, while they in fact do want to emit
3296 them on the fallthru edge. Look for abnormal call edges, find backward
3297 the call in the block and insert the instructions on the edge instead.
3298
3299 Similarly, handle instructions throwing exceptions internally.
3300
3301 Return true when instructions have been found and inserted on edges. */
3302
3303 bool
fixup_abnormal_edges(void)3304 fixup_abnormal_edges (void)
3305 {
3306 bool inserted = false;
3307 basic_block bb;
3308
3309 FOR_EACH_BB_FN (bb, cfun)
3310 {
3311 edge e;
3312 edge_iterator ei;
3313
3314 /* Look for cases we are interested in - calls or instructions causing
3315 exceptions. */
3316 FOR_EACH_EDGE (e, ei, bb->succs)
3317 if ((e->flags & EDGE_ABNORMAL_CALL)
3318 || ((e->flags & (EDGE_ABNORMAL | EDGE_EH))
3319 == (EDGE_ABNORMAL | EDGE_EH)))
3320 break;
3321
3322 if (e && !CALL_P (BB_END (bb)) && !can_throw_internal (BB_END (bb)))
3323 {
3324 rtx_insn *insn;
3325
3326 /* Get past the new insns generated. Allow notes, as the insns
3327 may be already deleted. */
3328 insn = BB_END (bb);
3329 while ((NONJUMP_INSN_P (insn) || NOTE_P (insn))
3330 && !can_throw_internal (insn)
3331 && insn != BB_HEAD (bb))
3332 insn = PREV_INSN (insn);
3333
3334 if (CALL_P (insn) || can_throw_internal (insn))
3335 {
3336 rtx_insn *stop, *next;
3337
3338 e = find_fallthru_edge (bb->succs);
3339
3340 stop = NEXT_INSN (BB_END (bb));
3341 BB_END (bb) = insn;
3342
3343 for (insn = NEXT_INSN (insn); insn != stop; insn = next)
3344 {
3345 next = NEXT_INSN (insn);
3346 if (INSN_P (insn))
3347 {
3348 delete_insn (insn);
3349
3350 /* Sometimes there's still the return value USE.
3351 If it's placed after a trapping call (i.e. that
3352 call is the last insn anyway), we have no fallthru
3353 edge. Simply delete this use and don't try to insert
3354 on the non-existent edge.
3355 Similarly, sometimes a call that can throw is
3356 followed in the source with __builtin_unreachable (),
3357 meaning that there is UB if the call returns rather
3358 than throws. If there weren't any instructions
3359 following such calls before, supposedly even the ones
3360 we've deleted aren't significant and can be
3361 removed. */
3362 if (e)
3363 {
3364 /* We're not deleting it, we're moving it. */
3365 insn->set_undeleted ();
3366 SET_PREV_INSN (insn) = NULL_RTX;
3367 SET_NEXT_INSN (insn) = NULL_RTX;
3368
3369 insert_insn_on_edge (insn, e);
3370 inserted = true;
3371 }
3372 }
3373 else if (!BARRIER_P (insn))
3374 set_block_for_insn (insn, NULL);
3375 }
3376 }
3377
3378 /* It may be that we don't find any trapping insn. In this
3379 case we discovered quite late that the insn that had been
3380 marked as can_throw_internal in fact couldn't trap at all.
3381 So we should in fact delete the EH edges out of the block. */
3382 else
3383 purge_dead_edges (bb);
3384 }
3385 }
3386
3387 return inserted;
3388 }
3389
3390 /* Cut the insns from FIRST to LAST out of the insns stream. */
3391
3392 rtx_insn *
unlink_insn_chain(rtx_insn * first,rtx_insn * last)3393 unlink_insn_chain (rtx_insn *first, rtx_insn *last)
3394 {
3395 rtx_insn *prevfirst = PREV_INSN (first);
3396 rtx_insn *nextlast = NEXT_INSN (last);
3397
3398 SET_PREV_INSN (first) = NULL;
3399 SET_NEXT_INSN (last) = NULL;
3400 if (prevfirst)
3401 SET_NEXT_INSN (prevfirst) = nextlast;
3402 if (nextlast)
3403 SET_PREV_INSN (nextlast) = prevfirst;
3404 else
3405 set_last_insn (prevfirst);
3406 if (!prevfirst)
3407 set_first_insn (nextlast);
3408 return first;
3409 }
3410
3411 /* Skip over inter-block insns occurring after BB which are typically
3412 associated with BB (e.g., barriers). If there are any such insns,
3413 we return the last one. Otherwise, we return the end of BB. */
3414
3415 static rtx_insn *
skip_insns_after_block(basic_block bb)3416 skip_insns_after_block (basic_block bb)
3417 {
3418 rtx_insn *insn, *last_insn, *next_head, *prev;
3419
3420 next_head = NULL;
3421 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3422 next_head = BB_HEAD (bb->next_bb);
3423
3424 for (last_insn = insn = BB_END (bb); (insn = NEXT_INSN (insn)) != 0; )
3425 {
3426 if (insn == next_head)
3427 break;
3428
3429 switch (GET_CODE (insn))
3430 {
3431 case BARRIER:
3432 last_insn = insn;
3433 continue;
3434
3435 case NOTE:
3436 switch (NOTE_KIND (insn))
3437 {
3438 case NOTE_INSN_BLOCK_END:
3439 gcc_unreachable ();
3440 continue;
3441 default:
3442 continue;
3443 break;
3444 }
3445 break;
3446
3447 case CODE_LABEL:
3448 if (NEXT_INSN (insn)
3449 && JUMP_TABLE_DATA_P (NEXT_INSN (insn)))
3450 {
3451 insn = NEXT_INSN (insn);
3452 last_insn = insn;
3453 continue;
3454 }
3455 break;
3456
3457 default:
3458 break;
3459 }
3460
3461 break;
3462 }
3463
3464 /* It is possible to hit contradictory sequence. For instance:
3465
3466 jump_insn
3467 NOTE_INSN_BLOCK_BEG
3468 barrier
3469
3470 Where barrier belongs to jump_insn, but the note does not. This can be
3471 created by removing the basic block originally following
3472 NOTE_INSN_BLOCK_BEG. In such case reorder the notes. */
3473
3474 for (insn = last_insn; insn != BB_END (bb); insn = prev)
3475 {
3476 prev = PREV_INSN (insn);
3477 if (NOTE_P (insn))
3478 switch (NOTE_KIND (insn))
3479 {
3480 case NOTE_INSN_BLOCK_END:
3481 gcc_unreachable ();
3482 break;
3483 case NOTE_INSN_DELETED:
3484 case NOTE_INSN_DELETED_LABEL:
3485 case NOTE_INSN_DELETED_DEBUG_LABEL:
3486 continue;
3487 default:
3488 reorder_insns (insn, insn, last_insn);
3489 }
3490 }
3491
3492 return last_insn;
3493 }
3494
3495 /* Locate or create a label for a given basic block. */
3496
3497 static rtx_insn *
label_for_bb(basic_block bb)3498 label_for_bb (basic_block bb)
3499 {
3500 rtx_insn *label = BB_HEAD (bb);
3501
3502 if (!LABEL_P (label))
3503 {
3504 if (dump_file)
3505 fprintf (dump_file, "Emitting label for block %d\n", bb->index);
3506
3507 label = block_label (bb);
3508 }
3509
3510 return label;
3511 }
3512
3513 /* Locate the effective beginning and end of the insn chain for each
3514 block, as defined by skip_insns_after_block above. */
3515
3516 static void
record_effective_endpoints(void)3517 record_effective_endpoints (void)
3518 {
3519 rtx_insn *next_insn;
3520 basic_block bb;
3521 rtx_insn *insn;
3522
3523 for (insn = get_insns ();
3524 insn
3525 && NOTE_P (insn)
3526 && NOTE_KIND (insn) != NOTE_INSN_BASIC_BLOCK;
3527 insn = NEXT_INSN (insn))
3528 continue;
3529 /* No basic blocks at all? */
3530 gcc_assert (insn);
3531
3532 if (PREV_INSN (insn))
3533 cfg_layout_function_header =
3534 unlink_insn_chain (get_insns (), PREV_INSN (insn));
3535 else
3536 cfg_layout_function_header = NULL;
3537
3538 next_insn = get_insns ();
3539 FOR_EACH_BB_FN (bb, cfun)
3540 {
3541 rtx_insn *end;
3542
3543 if (PREV_INSN (BB_HEAD (bb)) && next_insn != BB_HEAD (bb))
3544 BB_HEADER (bb) = unlink_insn_chain (next_insn,
3545 PREV_INSN (BB_HEAD (bb)));
3546 end = skip_insns_after_block (bb);
3547 if (NEXT_INSN (BB_END (bb)) && BB_END (bb) != end)
3548 BB_FOOTER (bb) = unlink_insn_chain (NEXT_INSN (BB_END (bb)), end);
3549 next_insn = NEXT_INSN (BB_END (bb));
3550 }
3551
3552 cfg_layout_function_footer = next_insn;
3553 if (cfg_layout_function_footer)
3554 cfg_layout_function_footer = unlink_insn_chain (cfg_layout_function_footer, get_last_insn ());
3555 }
3556
3557 namespace {
3558
3559 const pass_data pass_data_into_cfg_layout_mode =
3560 {
3561 RTL_PASS, /* type */
3562 "into_cfglayout", /* name */
3563 OPTGROUP_NONE, /* optinfo_flags */
3564 TV_CFG, /* tv_id */
3565 0, /* properties_required */
3566 PROP_cfglayout, /* properties_provided */
3567 0, /* properties_destroyed */
3568 0, /* todo_flags_start */
3569 0, /* todo_flags_finish */
3570 };
3571
3572 class pass_into_cfg_layout_mode : public rtl_opt_pass
3573 {
3574 public:
pass_into_cfg_layout_mode(gcc::context * ctxt)3575 pass_into_cfg_layout_mode (gcc::context *ctxt)
3576 : rtl_opt_pass (pass_data_into_cfg_layout_mode, ctxt)
3577 {}
3578
3579 /* opt_pass methods: */
execute(function *)3580 virtual unsigned int execute (function *)
3581 {
3582 cfg_layout_initialize (0);
3583 return 0;
3584 }
3585
3586 }; // class pass_into_cfg_layout_mode
3587
3588 } // anon namespace
3589
3590 rtl_opt_pass *
make_pass_into_cfg_layout_mode(gcc::context * ctxt)3591 make_pass_into_cfg_layout_mode (gcc::context *ctxt)
3592 {
3593 return new pass_into_cfg_layout_mode (ctxt);
3594 }
3595
3596 namespace {
3597
3598 const pass_data pass_data_outof_cfg_layout_mode =
3599 {
3600 RTL_PASS, /* type */
3601 "outof_cfglayout", /* name */
3602 OPTGROUP_NONE, /* optinfo_flags */
3603 TV_CFG, /* tv_id */
3604 0, /* properties_required */
3605 0, /* properties_provided */
3606 PROP_cfglayout, /* properties_destroyed */
3607 0, /* todo_flags_start */
3608 0, /* todo_flags_finish */
3609 };
3610
3611 class pass_outof_cfg_layout_mode : public rtl_opt_pass
3612 {
3613 public:
pass_outof_cfg_layout_mode(gcc::context * ctxt)3614 pass_outof_cfg_layout_mode (gcc::context *ctxt)
3615 : rtl_opt_pass (pass_data_outof_cfg_layout_mode, ctxt)
3616 {}
3617
3618 /* opt_pass methods: */
3619 virtual unsigned int execute (function *);
3620
3621 }; // class pass_outof_cfg_layout_mode
3622
3623 unsigned int
execute(function * fun)3624 pass_outof_cfg_layout_mode::execute (function *fun)
3625 {
3626 basic_block bb;
3627
3628 FOR_EACH_BB_FN (bb, fun)
3629 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (fun))
3630 bb->aux = bb->next_bb;
3631
3632 cfg_layout_finalize ();
3633
3634 return 0;
3635 }
3636
3637 } // anon namespace
3638
3639 rtl_opt_pass *
make_pass_outof_cfg_layout_mode(gcc::context * ctxt)3640 make_pass_outof_cfg_layout_mode (gcc::context *ctxt)
3641 {
3642 return new pass_outof_cfg_layout_mode (ctxt);
3643 }
3644
3645
3646 /* Link the basic blocks in the correct order, compacting the basic
3647 block queue while at it. If STAY_IN_CFGLAYOUT_MODE is false, this
3648 function also clears the basic block header and footer fields.
3649
3650 This function is usually called after a pass (e.g. tracer) finishes
3651 some transformations while in cfglayout mode. The required sequence
3652 of the basic blocks is in a linked list along the bb->aux field.
3653 This functions re-links the basic block prev_bb and next_bb pointers
3654 accordingly, and it compacts and renumbers the blocks.
3655
3656 FIXME: This currently works only for RTL, but the only RTL-specific
3657 bits are the STAY_IN_CFGLAYOUT_MODE bits. The tracer pass was moved
3658 to GIMPLE a long time ago, but it doesn't relink the basic block
3659 chain. It could do that (to give better initial RTL) if this function
3660 is made IR-agnostic (and moved to cfganal.c or cfg.c while at it). */
3661
3662 void
relink_block_chain(bool stay_in_cfglayout_mode)3663 relink_block_chain (bool stay_in_cfglayout_mode)
3664 {
3665 basic_block bb, prev_bb;
3666 int index;
3667
3668 /* Maybe dump the re-ordered sequence. */
3669 if (dump_file)
3670 {
3671 fprintf (dump_file, "Reordered sequence:\n");
3672 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, index =
3673 NUM_FIXED_BLOCKS;
3674 bb;
3675 bb = (basic_block) bb->aux, index++)
3676 {
3677 fprintf (dump_file, " %i ", index);
3678 if (get_bb_original (bb))
3679 fprintf (dump_file, "duplicate of %i ",
3680 get_bb_original (bb)->index);
3681 else if (forwarder_block_p (bb)
3682 && !LABEL_P (BB_HEAD (bb)))
3683 fprintf (dump_file, "compensation ");
3684 else
3685 fprintf (dump_file, "bb %i ", bb->index);
3686 }
3687 }
3688
3689 /* Now reorder the blocks. */
3690 prev_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
3691 bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
3692 for (; bb; prev_bb = bb, bb = (basic_block) bb->aux)
3693 {
3694 bb->prev_bb = prev_bb;
3695 prev_bb->next_bb = bb;
3696 }
3697 prev_bb->next_bb = EXIT_BLOCK_PTR_FOR_FN (cfun);
3698 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb = prev_bb;
3699
3700 /* Then, clean up the aux fields. */
3701 FOR_ALL_BB_FN (bb, cfun)
3702 {
3703 bb->aux = NULL;
3704 if (!stay_in_cfglayout_mode)
3705 BB_HEADER (bb) = BB_FOOTER (bb) = NULL;
3706 }
3707
3708 /* Maybe reset the original copy tables, they are not valid anymore
3709 when we renumber the basic blocks in compact_blocks. If we are
3710 are going out of cfglayout mode, don't re-allocate the tables. */
3711 if (original_copy_tables_initialized_p ())
3712 free_original_copy_tables ();
3713 if (stay_in_cfglayout_mode)
3714 initialize_original_copy_tables ();
3715
3716 /* Finally, put basic_block_info in the new order. */
3717 compact_blocks ();
3718 }
3719
3720
3721 /* Given a reorder chain, rearrange the code to match. */
3722
3723 static void
fixup_reorder_chain(void)3724 fixup_reorder_chain (void)
3725 {
3726 basic_block bb;
3727 rtx_insn *insn = NULL;
3728
3729 if (cfg_layout_function_header)
3730 {
3731 set_first_insn (cfg_layout_function_header);
3732 insn = cfg_layout_function_header;
3733 while (NEXT_INSN (insn))
3734 insn = NEXT_INSN (insn);
3735 }
3736
3737 /* First do the bulk reordering -- rechain the blocks without regard to
3738 the needed changes to jumps and labels. */
3739
3740 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb; bb = (basic_block)
3741 bb->aux)
3742 {
3743 if (BB_HEADER (bb))
3744 {
3745 if (insn)
3746 SET_NEXT_INSN (insn) = BB_HEADER (bb);
3747 else
3748 set_first_insn (BB_HEADER (bb));
3749 SET_PREV_INSN (BB_HEADER (bb)) = insn;
3750 insn = BB_HEADER (bb);
3751 while (NEXT_INSN (insn))
3752 insn = NEXT_INSN (insn);
3753 }
3754 if (insn)
3755 SET_NEXT_INSN (insn) = BB_HEAD (bb);
3756 else
3757 set_first_insn (BB_HEAD (bb));
3758 SET_PREV_INSN (BB_HEAD (bb)) = insn;
3759 insn = BB_END (bb);
3760 if (BB_FOOTER (bb))
3761 {
3762 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
3763 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
3764 while (NEXT_INSN (insn))
3765 insn = NEXT_INSN (insn);
3766 }
3767 }
3768
3769 SET_NEXT_INSN (insn) = cfg_layout_function_footer;
3770 if (cfg_layout_function_footer)
3771 SET_PREV_INSN (cfg_layout_function_footer) = insn;
3772
3773 while (NEXT_INSN (insn))
3774 insn = NEXT_INSN (insn);
3775
3776 set_last_insn (insn);
3777 if (flag_checking)
3778 verify_insn_chain ();
3779
3780 /* Now add jumps and labels as needed to match the blocks new
3781 outgoing edges. */
3782
3783 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb ; bb = (basic_block)
3784 bb->aux)
3785 {
3786 edge e_fall, e_taken, e;
3787 rtx_insn *bb_end_insn;
3788 rtx ret_label = NULL_RTX;
3789 basic_block nb;
3790 edge_iterator ei;
3791
3792 if (EDGE_COUNT (bb->succs) == 0)
3793 continue;
3794
3795 /* Find the old fallthru edge, and another non-EH edge for
3796 a taken jump. */
3797 e_taken = e_fall = NULL;
3798
3799 FOR_EACH_EDGE (e, ei, bb->succs)
3800 if (e->flags & EDGE_FALLTHRU)
3801 e_fall = e;
3802 else if (! (e->flags & EDGE_EH))
3803 e_taken = e;
3804
3805 bb_end_insn = BB_END (bb);
3806 if (rtx_jump_insn *bb_end_jump = dyn_cast <rtx_jump_insn *> (bb_end_insn))
3807 {
3808 ret_label = JUMP_LABEL (bb_end_jump);
3809 if (any_condjump_p (bb_end_jump))
3810 {
3811 /* This might happen if the conditional jump has side
3812 effects and could therefore not be optimized away.
3813 Make the basic block to end with a barrier in order
3814 to prevent rtl_verify_flow_info from complaining. */
3815 if (!e_fall)
3816 {
3817 gcc_assert (!onlyjump_p (bb_end_jump)
3818 || returnjump_p (bb_end_jump)
3819 || (e_taken->flags & EDGE_CROSSING));
3820 emit_barrier_after (bb_end_jump);
3821 continue;
3822 }
3823
3824 /* If the old fallthru is still next, nothing to do. */
3825 if (bb->aux == e_fall->dest
3826 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3827 continue;
3828
3829 /* The degenerated case of conditional jump jumping to the next
3830 instruction can happen for jumps with side effects. We need
3831 to construct a forwarder block and this will be done just
3832 fine by force_nonfallthru below. */
3833 if (!e_taken)
3834 ;
3835
3836 /* There is another special case: if *neither* block is next,
3837 such as happens at the very end of a function, then we'll
3838 need to add a new unconditional jump. Choose the taken
3839 edge based on known or assumed probability. */
3840 else if (bb->aux != e_taken->dest)
3841 {
3842 rtx note = find_reg_note (bb_end_jump, REG_BR_PROB, 0);
3843
3844 if (note
3845 && profile_probability::from_reg_br_prob_note
3846 (XINT (note, 0)) < profile_probability::even ()
3847 && invert_jump (bb_end_jump,
3848 (e_fall->dest
3849 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3850 ? NULL_RTX
3851 : label_for_bb (e_fall->dest)), 0))
3852 {
3853 e_fall->flags &= ~EDGE_FALLTHRU;
3854 gcc_checking_assert (could_fall_through
3855 (e_taken->src, e_taken->dest));
3856 e_taken->flags |= EDGE_FALLTHRU;
3857 update_br_prob_note (bb);
3858 e = e_fall, e_fall = e_taken, e_taken = e;
3859 }
3860 }
3861
3862 /* If the "jumping" edge is a crossing edge, and the fall
3863 through edge is non-crossing, leave things as they are. */
3864 else if ((e_taken->flags & EDGE_CROSSING)
3865 && !(e_fall->flags & EDGE_CROSSING))
3866 continue;
3867
3868 /* Otherwise we can try to invert the jump. This will
3869 basically never fail, however, keep up the pretense. */
3870 else if (invert_jump (bb_end_jump,
3871 (e_fall->dest
3872 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3873 ? NULL_RTX
3874 : label_for_bb (e_fall->dest)), 0))
3875 {
3876 e_fall->flags &= ~EDGE_FALLTHRU;
3877 gcc_checking_assert (could_fall_through
3878 (e_taken->src, e_taken->dest));
3879 e_taken->flags |= EDGE_FALLTHRU;
3880 update_br_prob_note (bb);
3881 if (LABEL_NUSES (ret_label) == 0
3882 && single_pred_p (e_taken->dest))
3883 delete_insn (as_a<rtx_insn *> (ret_label));
3884 continue;
3885 }
3886 }
3887 else if (extract_asm_operands (PATTERN (bb_end_insn)) != NULL)
3888 {
3889 /* If the old fallthru is still next or if
3890 asm goto doesn't have a fallthru (e.g. when followed by
3891 __builtin_unreachable ()), nothing to do. */
3892 if (! e_fall
3893 || bb->aux == e_fall->dest
3894 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3895 continue;
3896
3897 /* Otherwise we'll have to use the fallthru fixup below. */
3898 }
3899 else
3900 {
3901 /* Otherwise we have some return, switch or computed
3902 jump. In the 99% case, there should not have been a
3903 fallthru edge. */
3904 gcc_assert (returnjump_p (bb_end_insn) || !e_fall);
3905 continue;
3906 }
3907 }
3908 else
3909 {
3910 /* No fallthru implies a noreturn function with EH edges, or
3911 something similarly bizarre. In any case, we don't need to
3912 do anything. */
3913 if (! e_fall)
3914 continue;
3915
3916 /* If the fallthru block is still next, nothing to do. */
3917 if (bb->aux == e_fall->dest)
3918 continue;
3919
3920 /* A fallthru to exit block. */
3921 if (e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3922 continue;
3923 }
3924
3925 /* We got here if we need to add a new jump insn.
3926 Note force_nonfallthru can delete E_FALL and thus we have to
3927 save E_FALL->src prior to the call to force_nonfallthru. */
3928 nb = force_nonfallthru_and_redirect (e_fall, e_fall->dest, ret_label);
3929 if (nb)
3930 {
3931 nb->aux = bb->aux;
3932 bb->aux = nb;
3933 /* Don't process this new block. */
3934 bb = nb;
3935 }
3936 }
3937
3938 relink_block_chain (/*stay_in_cfglayout_mode=*/false);
3939
3940 /* Annoying special case - jump around dead jumptables left in the code. */
3941 FOR_EACH_BB_FN (bb, cfun)
3942 {
3943 edge e = find_fallthru_edge (bb->succs);
3944
3945 if (e && !can_fallthru (e->src, e->dest))
3946 force_nonfallthru (e);
3947 }
3948
3949 /* Ensure goto_locus from edges has some instructions with that locus in RTL
3950 when not optimizing. */
3951 if (!optimize && !DECL_IGNORED_P (current_function_decl))
3952 FOR_EACH_BB_FN (bb, cfun)
3953 {
3954 edge e;
3955 edge_iterator ei;
3956
3957 FOR_EACH_EDGE (e, ei, bb->succs)
3958 if (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
3959 && !(e->flags & EDGE_ABNORMAL))
3960 {
3961 edge e2;
3962 edge_iterator ei2;
3963 basic_block dest, nb;
3964 rtx_insn *end;
3965
3966 insn = BB_END (e->src);
3967 end = PREV_INSN (BB_HEAD (e->src));
3968 while (insn != end
3969 && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
3970 insn = PREV_INSN (insn);
3971 if (insn != end
3972 && INSN_LOCATION (insn) == e->goto_locus)
3973 continue;
3974 if (simplejump_p (BB_END (e->src))
3975 && !INSN_HAS_LOCATION (BB_END (e->src)))
3976 {
3977 INSN_LOCATION (BB_END (e->src)) = e->goto_locus;
3978 continue;
3979 }
3980 dest = e->dest;
3981 if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3982 {
3983 /* Non-fallthru edges to the exit block cannot be split. */
3984 if (!(e->flags & EDGE_FALLTHRU))
3985 continue;
3986 }
3987 else
3988 {
3989 insn = BB_HEAD (dest);
3990 end = NEXT_INSN (BB_END (dest));
3991 while (insn != end && !NONDEBUG_INSN_P (insn))
3992 insn = NEXT_INSN (insn);
3993 if (insn != end && INSN_HAS_LOCATION (insn)
3994 && INSN_LOCATION (insn) == e->goto_locus)
3995 continue;
3996 }
3997 nb = split_edge (e);
3998 if (!INSN_P (BB_END (nb)))
3999 BB_END (nb) = emit_insn_after_noloc (gen_nop (), BB_END (nb),
4000 nb);
4001 INSN_LOCATION (BB_END (nb)) = e->goto_locus;
4002
4003 /* If there are other incoming edges to the destination block
4004 with the same goto locus, redirect them to the new block as
4005 well, this can prevent other such blocks from being created
4006 in subsequent iterations of the loop. */
4007 for (ei2 = ei_start (dest->preds); (e2 = ei_safe_edge (ei2)); )
4008 if (LOCATION_LOCUS (e2->goto_locus) != UNKNOWN_LOCATION
4009 && !(e2->flags & (EDGE_ABNORMAL | EDGE_FALLTHRU))
4010 && e->goto_locus == e2->goto_locus)
4011 redirect_edge_and_branch (e2, nb);
4012 else
4013 ei_next (&ei2);
4014 }
4015 }
4016 }
4017
4018 /* Perform sanity checks on the insn chain.
4019 1. Check that next/prev pointers are consistent in both the forward and
4020 reverse direction.
4021 2. Count insns in chain, going both directions, and check if equal.
4022 3. Check that get_last_insn () returns the actual end of chain. */
4023
4024 DEBUG_FUNCTION void
verify_insn_chain(void)4025 verify_insn_chain (void)
4026 {
4027 rtx_insn *x, *prevx, *nextx;
4028 int insn_cnt1, insn_cnt2;
4029
4030 for (prevx = NULL, insn_cnt1 = 1, x = get_insns ();
4031 x != 0;
4032 prevx = x, insn_cnt1++, x = NEXT_INSN (x))
4033 gcc_assert (PREV_INSN (x) == prevx);
4034
4035 gcc_assert (prevx == get_last_insn ());
4036
4037 for (nextx = NULL, insn_cnt2 = 1, x = get_last_insn ();
4038 x != 0;
4039 nextx = x, insn_cnt2++, x = PREV_INSN (x))
4040 gcc_assert (NEXT_INSN (x) == nextx);
4041
4042 gcc_assert (insn_cnt1 == insn_cnt2);
4043 }
4044
4045 /* If we have assembler epilogues, the block falling through to exit must
4046 be the last one in the reordered chain when we reach final. Ensure
4047 that this condition is met. */
4048 static void
fixup_fallthru_exit_predecessor(void)4049 fixup_fallthru_exit_predecessor (void)
4050 {
4051 edge e;
4052 basic_block bb = NULL;
4053
4054 /* This transformation is not valid before reload, because we might
4055 separate a call from the instruction that copies the return
4056 value. */
4057 gcc_assert (reload_completed);
4058
4059 e = find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4060 if (e)
4061 bb = e->src;
4062
4063 if (bb && bb->aux)
4064 {
4065 basic_block c = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
4066
4067 /* If the very first block is the one with the fall-through exit
4068 edge, we have to split that block. */
4069 if (c == bb)
4070 {
4071 bb = split_block_after_labels (bb)->dest;
4072 bb->aux = c->aux;
4073 c->aux = bb;
4074 BB_FOOTER (bb) = BB_FOOTER (c);
4075 BB_FOOTER (c) = NULL;
4076 }
4077
4078 while (c->aux != bb)
4079 c = (basic_block) c->aux;
4080
4081 c->aux = bb->aux;
4082 while (c->aux)
4083 c = (basic_block) c->aux;
4084
4085 c->aux = bb;
4086 bb->aux = NULL;
4087 }
4088 }
4089
4090 /* In case there are more than one fallthru predecessors of exit, force that
4091 there is only one. */
4092
4093 static void
force_one_exit_fallthru(void)4094 force_one_exit_fallthru (void)
4095 {
4096 edge e, predecessor = NULL;
4097 bool more = false;
4098 edge_iterator ei;
4099 basic_block forwarder, bb;
4100
4101 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
4102 if (e->flags & EDGE_FALLTHRU)
4103 {
4104 if (predecessor == NULL)
4105 predecessor = e;
4106 else
4107 {
4108 more = true;
4109 break;
4110 }
4111 }
4112
4113 if (!more)
4114 return;
4115
4116 /* Exit has several fallthru predecessors. Create a forwarder block for
4117 them. */
4118 forwarder = split_edge (predecessor);
4119 for (ei = ei_start (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4120 (e = ei_safe_edge (ei)); )
4121 {
4122 if (e->src == forwarder
4123 || !(e->flags & EDGE_FALLTHRU))
4124 ei_next (&ei);
4125 else
4126 redirect_edge_and_branch_force (e, forwarder);
4127 }
4128
4129 /* Fix up the chain of blocks -- make FORWARDER immediately precede the
4130 exit block. */
4131 FOR_EACH_BB_FN (bb, cfun)
4132 {
4133 if (bb->aux == NULL && bb != forwarder)
4134 {
4135 bb->aux = forwarder;
4136 break;
4137 }
4138 }
4139 }
4140
4141 /* Return true in case it is possible to duplicate the basic block BB. */
4142
4143 static bool
cfg_layout_can_duplicate_bb_p(const_basic_block bb)4144 cfg_layout_can_duplicate_bb_p (const_basic_block bb)
4145 {
4146 /* Do not attempt to duplicate tablejumps, as we need to unshare
4147 the dispatch table. This is difficult to do, as the instructions
4148 computing jump destination may be hoisted outside the basic block. */
4149 if (tablejump_p (BB_END (bb), NULL, NULL))
4150 return false;
4151
4152 /* Do not duplicate blocks containing insns that can't be copied. */
4153 if (targetm.cannot_copy_insn_p)
4154 {
4155 rtx_insn *insn = BB_HEAD (bb);
4156 while (1)
4157 {
4158 if (INSN_P (insn) && targetm.cannot_copy_insn_p (insn))
4159 return false;
4160 if (insn == BB_END (bb))
4161 break;
4162 insn = NEXT_INSN (insn);
4163 }
4164 }
4165
4166 return true;
4167 }
4168
4169 rtx_insn *
duplicate_insn_chain(rtx_insn * from,rtx_insn * to)4170 duplicate_insn_chain (rtx_insn *from, rtx_insn *to)
4171 {
4172 rtx_insn *insn, *next, *copy;
4173 rtx_note *last;
4174
4175 /* Avoid updating of boundaries of previous basic block. The
4176 note will get removed from insn stream in fixup. */
4177 last = emit_note (NOTE_INSN_DELETED);
4178
4179 /* Create copy at the end of INSN chain. The chain will
4180 be reordered later. */
4181 for (insn = from; insn != NEXT_INSN (to); insn = NEXT_INSN (insn))
4182 {
4183 switch (GET_CODE (insn))
4184 {
4185 case DEBUG_INSN:
4186 /* Don't duplicate label debug insns. */
4187 if (DEBUG_BIND_INSN_P (insn)
4188 && TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL)
4189 break;
4190 /* FALLTHRU */
4191 case INSN:
4192 case CALL_INSN:
4193 case JUMP_INSN:
4194 copy = emit_copy_of_insn_after (insn, get_last_insn ());
4195 if (JUMP_P (insn) && JUMP_LABEL (insn) != NULL_RTX
4196 && ANY_RETURN_P (JUMP_LABEL (insn)))
4197 JUMP_LABEL (copy) = JUMP_LABEL (insn);
4198 maybe_copy_prologue_epilogue_insn (insn, copy);
4199 break;
4200
4201 case JUMP_TABLE_DATA:
4202 /* Avoid copying of dispatch tables. We never duplicate
4203 tablejumps, so this can hit only in case the table got
4204 moved far from original jump.
4205 Avoid copying following barrier as well if any
4206 (and debug insns in between). */
4207 for (next = NEXT_INSN (insn);
4208 next != NEXT_INSN (to);
4209 next = NEXT_INSN (next))
4210 if (!DEBUG_INSN_P (next))
4211 break;
4212 if (next != NEXT_INSN (to) && BARRIER_P (next))
4213 insn = next;
4214 break;
4215
4216 case CODE_LABEL:
4217 break;
4218
4219 case BARRIER:
4220 emit_barrier ();
4221 break;
4222
4223 case NOTE:
4224 switch (NOTE_KIND (insn))
4225 {
4226 /* In case prologue is empty and function contain label
4227 in first BB, we may want to copy the block. */
4228 case NOTE_INSN_PROLOGUE_END:
4229
4230 case NOTE_INSN_DELETED:
4231 case NOTE_INSN_DELETED_LABEL:
4232 case NOTE_INSN_DELETED_DEBUG_LABEL:
4233 /* No problem to strip these. */
4234 case NOTE_INSN_FUNCTION_BEG:
4235 /* There is always just single entry to function. */
4236 case NOTE_INSN_BASIC_BLOCK:
4237 /* We should only switch text sections once. */
4238 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
4239 break;
4240
4241 case NOTE_INSN_EPILOGUE_BEG:
4242 case NOTE_INSN_UPDATE_SJLJ_CONTEXT:
4243 emit_note_copy (as_a <rtx_note *> (insn));
4244 break;
4245
4246 default:
4247 /* All other notes should have already been eliminated. */
4248 gcc_unreachable ();
4249 }
4250 break;
4251 default:
4252 gcc_unreachable ();
4253 }
4254 }
4255 insn = NEXT_INSN (last);
4256 delete_insn (last);
4257 return insn;
4258 }
4259
4260 /* Create a duplicate of the basic block BB. */
4261
4262 static basic_block
cfg_layout_duplicate_bb(basic_block bb,copy_bb_data *)4263 cfg_layout_duplicate_bb (basic_block bb, copy_bb_data *)
4264 {
4265 rtx_insn *insn;
4266 basic_block new_bb;
4267
4268 insn = duplicate_insn_chain (BB_HEAD (bb), BB_END (bb));
4269 new_bb = create_basic_block (insn,
4270 insn ? get_last_insn () : NULL,
4271 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
4272
4273 BB_COPY_PARTITION (new_bb, bb);
4274 if (BB_HEADER (bb))
4275 {
4276 insn = BB_HEADER (bb);
4277 while (NEXT_INSN (insn))
4278 insn = NEXT_INSN (insn);
4279 insn = duplicate_insn_chain (BB_HEADER (bb), insn);
4280 if (insn)
4281 BB_HEADER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4282 }
4283
4284 if (BB_FOOTER (bb))
4285 {
4286 insn = BB_FOOTER (bb);
4287 while (NEXT_INSN (insn))
4288 insn = NEXT_INSN (insn);
4289 insn = duplicate_insn_chain (BB_FOOTER (bb), insn);
4290 if (insn)
4291 BB_FOOTER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4292 }
4293
4294 return new_bb;
4295 }
4296
4297
4298 /* Main entry point to this module - initialize the datastructures for
4299 CFG layout changes. It keeps LOOPS up-to-date if not null.
4300
4301 FLAGS is a set of additional flags to pass to cleanup_cfg(). */
4302
4303 void
cfg_layout_initialize(int flags)4304 cfg_layout_initialize (int flags)
4305 {
4306 rtx_insn_list *x;
4307 basic_block bb;
4308
4309 /* Once bb partitioning is complete, cfg layout mode should not be
4310 re-entered. Entering cfg layout mode may require fixups. As an
4311 example, if edge forwarding performed when optimizing the cfg
4312 layout required moving a block from the hot to the cold
4313 section. This would create an illegal partitioning unless some
4314 manual fixup was performed. */
4315 gcc_assert (!crtl->bb_reorder_complete || !crtl->has_bb_partition);
4316
4317 initialize_original_copy_tables ();
4318
4319 cfg_layout_rtl_register_cfg_hooks ();
4320
4321 record_effective_endpoints ();
4322
4323 /* Make sure that the targets of non local gotos are marked. */
4324 for (x = nonlocal_goto_handler_labels; x; x = x->next ())
4325 {
4326 bb = BLOCK_FOR_INSN (x->insn ());
4327 bb->flags |= BB_NON_LOCAL_GOTO_TARGET;
4328 }
4329
4330 cleanup_cfg (CLEANUP_CFGLAYOUT | flags);
4331 }
4332
4333 /* Splits superblocks. */
4334 void
break_superblocks(void)4335 break_superblocks (void)
4336 {
4337 bool need = false;
4338 basic_block bb;
4339
4340 auto_sbitmap superblocks (last_basic_block_for_fn (cfun));
4341 bitmap_clear (superblocks);
4342
4343 FOR_EACH_BB_FN (bb, cfun)
4344 if (bb->flags & BB_SUPERBLOCK)
4345 {
4346 bb->flags &= ~BB_SUPERBLOCK;
4347 bitmap_set_bit (superblocks, bb->index);
4348 need = true;
4349 }
4350
4351 if (need)
4352 {
4353 rebuild_jump_labels (get_insns ());
4354 find_many_sub_basic_blocks (superblocks);
4355 }
4356 }
4357
4358 /* Finalize the changes: reorder insn list according to the sequence specified
4359 by aux pointers, enter compensation code, rebuild scope forest. */
4360
4361 void
cfg_layout_finalize(void)4362 cfg_layout_finalize (void)
4363 {
4364 free_dominance_info (CDI_DOMINATORS);
4365 force_one_exit_fallthru ();
4366 rtl_register_cfg_hooks ();
4367 if (reload_completed && !targetm.have_epilogue ())
4368 fixup_fallthru_exit_predecessor ();
4369 fixup_reorder_chain ();
4370
4371 rebuild_jump_labels (get_insns ());
4372 delete_dead_jumptables ();
4373
4374 if (flag_checking)
4375 verify_insn_chain ();
4376 checking_verify_flow_info ();
4377 }
4378
4379
4380 /* Same as split_block but update cfg_layout structures. */
4381
4382 static basic_block
cfg_layout_split_block(basic_block bb,void * insnp)4383 cfg_layout_split_block (basic_block bb, void *insnp)
4384 {
4385 rtx insn = (rtx) insnp;
4386 basic_block new_bb = rtl_split_block (bb, insn);
4387
4388 BB_FOOTER (new_bb) = BB_FOOTER (bb);
4389 BB_FOOTER (bb) = NULL;
4390
4391 return new_bb;
4392 }
4393
4394 /* Redirect Edge to DEST. */
4395 static edge
cfg_layout_redirect_edge_and_branch(edge e,basic_block dest)4396 cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
4397 {
4398 basic_block src = e->src;
4399 edge ret;
4400
4401 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
4402 return NULL;
4403
4404 if (e->dest == dest)
4405 return e;
4406
4407 if (e->flags & EDGE_CROSSING
4408 && BB_PARTITION (e->src) == BB_PARTITION (dest)
4409 && simplejump_p (BB_END (src)))
4410 {
4411 if (dump_file)
4412 fprintf (dump_file,
4413 "Removing crossing jump while redirecting edge form %i to %i\n",
4414 e->src->index, dest->index);
4415 delete_insn (BB_END (src));
4416 remove_barriers_from_footer (src);
4417 e->flags |= EDGE_FALLTHRU;
4418 }
4419
4420 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4421 && (ret = try_redirect_by_replacing_jump (e, dest, true)))
4422 {
4423 df_set_bb_dirty (src);
4424 return ret;
4425 }
4426
4427 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4428 && (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX))
4429 {
4430 if (dump_file)
4431 fprintf (dump_file, "Redirecting entry edge from bb %i to %i\n",
4432 e->src->index, dest->index);
4433
4434 df_set_bb_dirty (e->src);
4435 redirect_edge_succ (e, dest);
4436 return e;
4437 }
4438
4439 /* Redirect_edge_and_branch may decide to turn branch into fallthru edge
4440 in the case the basic block appears to be in sequence. Avoid this
4441 transformation. */
4442
4443 if (e->flags & EDGE_FALLTHRU)
4444 {
4445 /* Redirect any branch edges unified with the fallthru one. */
4446 if (JUMP_P (BB_END (src))
4447 && label_is_jump_target_p (BB_HEAD (e->dest),
4448 BB_END (src)))
4449 {
4450 edge redirected;
4451
4452 if (dump_file)
4453 fprintf (dump_file, "Fallthru edge unified with branch "
4454 "%i->%i redirected to %i\n",
4455 e->src->index, e->dest->index, dest->index);
4456 e->flags &= ~EDGE_FALLTHRU;
4457 redirected = redirect_branch_edge (e, dest);
4458 gcc_assert (redirected);
4459 redirected->flags |= EDGE_FALLTHRU;
4460 df_set_bb_dirty (redirected->src);
4461 return redirected;
4462 }
4463 /* In case we are redirecting fallthru edge to the branch edge
4464 of conditional jump, remove it. */
4465 if (EDGE_COUNT (src->succs) == 2)
4466 {
4467 /* Find the edge that is different from E. */
4468 edge s = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e);
4469
4470 if (s->dest == dest
4471 && any_condjump_p (BB_END (src))
4472 && onlyjump_p (BB_END (src)))
4473 delete_insn (BB_END (src));
4474 }
4475 if (dump_file)
4476 fprintf (dump_file, "Redirecting fallthru edge %i->%i to %i\n",
4477 e->src->index, e->dest->index, dest->index);
4478 ret = redirect_edge_succ_nodup (e, dest);
4479 }
4480 else
4481 ret = redirect_branch_edge (e, dest);
4482
4483 if (!ret)
4484 return NULL;
4485
4486 fixup_partition_crossing (ret);
4487 /* We don't want simplejumps in the insn stream during cfglayout. */
4488 gcc_assert (!simplejump_p (BB_END (src)) || CROSSING_JUMP_P (BB_END (src)));
4489
4490 df_set_bb_dirty (src);
4491 return ret;
4492 }
4493
4494 /* Simple wrapper as we always can redirect fallthru edges. */
4495 static basic_block
cfg_layout_redirect_edge_and_branch_force(edge e,basic_block dest)4496 cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest)
4497 {
4498 edge redirected = cfg_layout_redirect_edge_and_branch (e, dest);
4499
4500 gcc_assert (redirected);
4501 return NULL;
4502 }
4503
4504 /* Same as delete_basic_block but update cfg_layout structures. */
4505
4506 static void
cfg_layout_delete_block(basic_block bb)4507 cfg_layout_delete_block (basic_block bb)
4508 {
4509 rtx_insn *insn, *next, *prev = PREV_INSN (BB_HEAD (bb)), *remaints;
4510 rtx_insn **to;
4511
4512 if (BB_HEADER (bb))
4513 {
4514 next = BB_HEAD (bb);
4515 if (prev)
4516 SET_NEXT_INSN (prev) = BB_HEADER (bb);
4517 else
4518 set_first_insn (BB_HEADER (bb));
4519 SET_PREV_INSN (BB_HEADER (bb)) = prev;
4520 insn = BB_HEADER (bb);
4521 while (NEXT_INSN (insn))
4522 insn = NEXT_INSN (insn);
4523 SET_NEXT_INSN (insn) = next;
4524 SET_PREV_INSN (next) = insn;
4525 }
4526 next = NEXT_INSN (BB_END (bb));
4527 if (BB_FOOTER (bb))
4528 {
4529 insn = BB_FOOTER (bb);
4530 while (insn)
4531 {
4532 if (BARRIER_P (insn))
4533 {
4534 if (PREV_INSN (insn))
4535 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
4536 else
4537 BB_FOOTER (bb) = NEXT_INSN (insn);
4538 if (NEXT_INSN (insn))
4539 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
4540 }
4541 if (LABEL_P (insn))
4542 break;
4543 insn = NEXT_INSN (insn);
4544 }
4545 if (BB_FOOTER (bb))
4546 {
4547 insn = BB_END (bb);
4548 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
4549 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
4550 while (NEXT_INSN (insn))
4551 insn = NEXT_INSN (insn);
4552 SET_NEXT_INSN (insn) = next;
4553 if (next)
4554 SET_PREV_INSN (next) = insn;
4555 else
4556 set_last_insn (insn);
4557 }
4558 }
4559 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4560 to = &BB_HEADER (bb->next_bb);
4561 else
4562 to = &cfg_layout_function_footer;
4563
4564 rtl_delete_block (bb);
4565
4566 if (prev)
4567 prev = NEXT_INSN (prev);
4568 else
4569 prev = get_insns ();
4570 if (next)
4571 next = PREV_INSN (next);
4572 else
4573 next = get_last_insn ();
4574
4575 if (next && NEXT_INSN (next) != prev)
4576 {
4577 remaints = unlink_insn_chain (prev, next);
4578 insn = remaints;
4579 while (NEXT_INSN (insn))
4580 insn = NEXT_INSN (insn);
4581 SET_NEXT_INSN (insn) = *to;
4582 if (*to)
4583 SET_PREV_INSN (*to) = insn;
4584 *to = remaints;
4585 }
4586 }
4587
4588 /* Return true when blocks A and B can be safely merged. */
4589
4590 static bool
cfg_layout_can_merge_blocks_p(basic_block a,basic_block b)4591 cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
4592 {
4593 /* If we are partitioning hot/cold basic blocks, we don't want to
4594 mess up unconditional or indirect jumps that cross between hot
4595 and cold sections.
4596
4597 Basic block partitioning may result in some jumps that appear to
4598 be optimizable (or blocks that appear to be mergeable), but which really
4599 must be left untouched (they are required to make it safely across
4600 partition boundaries). See the comments at the top of
4601 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
4602
4603 if (BB_PARTITION (a) != BB_PARTITION (b))
4604 return false;
4605
4606 /* Protect the loop latches. */
4607 if (current_loops && b->loop_father->latch == b)
4608 return false;
4609
4610 /* If we would end up moving B's instructions, make sure it doesn't fall
4611 through into the exit block, since we cannot recover from a fallthrough
4612 edge into the exit block occurring in the middle of a function. */
4613 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4614 {
4615 edge e = find_fallthru_edge (b->succs);
4616 if (e && e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4617 return false;
4618 }
4619
4620 /* There must be exactly one edge in between the blocks. */
4621 return (single_succ_p (a)
4622 && single_succ (a) == b
4623 && single_pred_p (b) == 1
4624 && a != b
4625 /* Must be simple edge. */
4626 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
4627 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4628 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
4629 /* If the jump insn has side effects, we can't kill the edge.
4630 When not optimizing, try_redirect_by_replacing_jump will
4631 not allow us to redirect an edge by replacing a table jump. */
4632 && (!JUMP_P (BB_END (a))
4633 || ((!optimize || reload_completed)
4634 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
4635 }
4636
4637 /* Merge block A and B. The blocks must be mergeable. */
4638
4639 static void
cfg_layout_merge_blocks(basic_block a,basic_block b)4640 cfg_layout_merge_blocks (basic_block a, basic_block b)
4641 {
4642 /* If B is a forwarder block whose outgoing edge has no location, we'll
4643 propagate the locus of the edge between A and B onto it. */
4644 const bool forward_edge_locus
4645 = (b->flags & BB_FORWARDER_BLOCK) != 0
4646 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION;
4647 rtx_insn *insn;
4648
4649 gcc_checking_assert (cfg_layout_can_merge_blocks_p (a, b));
4650
4651 if (dump_file)
4652 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
4653 a->index);
4654
4655 /* If there was a CODE_LABEL beginning B, delete it. */
4656 if (LABEL_P (BB_HEAD (b)))
4657 {
4658 delete_insn (BB_HEAD (b));
4659 }
4660
4661 /* We should have fallthru edge in a, or we can do dummy redirection to get
4662 it cleaned up. */
4663 if (JUMP_P (BB_END (a)))
4664 try_redirect_by_replacing_jump (EDGE_SUCC (a, 0), b, true);
4665 gcc_assert (!JUMP_P (BB_END (a)));
4666
4667 /* If not optimizing, preserve the locus of the single edge between
4668 blocks A and B if necessary by emitting a nop. */
4669 if (!optimize
4670 && !forward_edge_locus
4671 && !DECL_IGNORED_P (current_function_decl))
4672 emit_nop_for_unique_locus_between (a, b);
4673
4674 /* Move things from b->footer after a->footer. */
4675 if (BB_FOOTER (b))
4676 {
4677 if (!BB_FOOTER (a))
4678 BB_FOOTER (a) = BB_FOOTER (b);
4679 else
4680 {
4681 rtx_insn *last = BB_FOOTER (a);
4682
4683 while (NEXT_INSN (last))
4684 last = NEXT_INSN (last);
4685 SET_NEXT_INSN (last) = BB_FOOTER (b);
4686 SET_PREV_INSN (BB_FOOTER (b)) = last;
4687 }
4688 BB_FOOTER (b) = NULL;
4689 }
4690
4691 /* Move things from b->header before a->footer.
4692 Note that this may include dead tablejump data, but we don't clean
4693 those up until we go out of cfglayout mode. */
4694 if (BB_HEADER (b))
4695 {
4696 if (! BB_FOOTER (a))
4697 BB_FOOTER (a) = BB_HEADER (b);
4698 else
4699 {
4700 rtx_insn *last = BB_HEADER (b);
4701
4702 while (NEXT_INSN (last))
4703 last = NEXT_INSN (last);
4704 SET_NEXT_INSN (last) = BB_FOOTER (a);
4705 SET_PREV_INSN (BB_FOOTER (a)) = last;
4706 BB_FOOTER (a) = BB_HEADER (b);
4707 }
4708 BB_HEADER (b) = NULL;
4709 }
4710
4711 /* In the case basic blocks are not adjacent, move them around. */
4712 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4713 {
4714 insn = unlink_insn_chain (BB_HEAD (b), BB_END (b));
4715
4716 emit_insn_after_noloc (insn, BB_END (a), a);
4717 }
4718 /* Otherwise just re-associate the instructions. */
4719 else
4720 {
4721 insn = BB_HEAD (b);
4722 BB_END (a) = BB_END (b);
4723 }
4724
4725 /* emit_insn_after_noloc doesn't call df_insn_change_bb.
4726 We need to explicitly call. */
4727 update_bb_for_insn_chain (insn, BB_END (b), a);
4728
4729 /* Skip possible DELETED_LABEL insn. */
4730 if (!NOTE_INSN_BASIC_BLOCK_P (insn))
4731 insn = NEXT_INSN (insn);
4732 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4733 BB_HEAD (b) = BB_END (b) = NULL;
4734 delete_insn (insn);
4735
4736 df_bb_delete (b->index);
4737
4738 if (forward_edge_locus)
4739 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
4740
4741 if (dump_file)
4742 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
4743 }
4744
4745 /* Split edge E. */
4746
4747 static basic_block
cfg_layout_split_edge(edge e)4748 cfg_layout_split_edge (edge e)
4749 {
4750 basic_block new_bb =
4751 create_basic_block (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4752 ? NEXT_INSN (BB_END (e->src)) : get_insns (),
4753 NULL_RTX, e->src);
4754
4755 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4756 BB_COPY_PARTITION (new_bb, e->src);
4757 else
4758 BB_COPY_PARTITION (new_bb, e->dest);
4759 make_edge (new_bb, e->dest, EDGE_FALLTHRU);
4760 redirect_edge_and_branch_force (e, new_bb);
4761
4762 return new_bb;
4763 }
4764
4765 /* Do postprocessing after making a forwarder block joined by edge FALLTHRU. */
4766
4767 static void
rtl_make_forwarder_block(edge fallthru ATTRIBUTE_UNUSED)4768 rtl_make_forwarder_block (edge fallthru ATTRIBUTE_UNUSED)
4769 {
4770 }
4771
4772 /* Return true if BB contains only labels or non-executable
4773 instructions. */
4774
4775 static bool
rtl_block_empty_p(basic_block bb)4776 rtl_block_empty_p (basic_block bb)
4777 {
4778 rtx_insn *insn;
4779
4780 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4781 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4782 return true;
4783
4784 FOR_BB_INSNS (bb, insn)
4785 if (NONDEBUG_INSN_P (insn) && !any_uncondjump_p (insn))
4786 return false;
4787
4788 return true;
4789 }
4790
4791 /* Split a basic block if it ends with a conditional branch and if
4792 the other part of the block is not empty. */
4793
4794 static basic_block
rtl_split_block_before_cond_jump(basic_block bb)4795 rtl_split_block_before_cond_jump (basic_block bb)
4796 {
4797 rtx_insn *insn;
4798 rtx_insn *split_point = NULL;
4799 rtx_insn *last = NULL;
4800 bool found_code = false;
4801
4802 FOR_BB_INSNS (bb, insn)
4803 {
4804 if (any_condjump_p (insn))
4805 split_point = last;
4806 else if (NONDEBUG_INSN_P (insn))
4807 found_code = true;
4808 last = insn;
4809 }
4810
4811 /* Did not find everything. */
4812 if (found_code && split_point)
4813 return split_block (bb, split_point)->dest;
4814 else
4815 return NULL;
4816 }
4817
4818 /* Return 1 if BB ends with a call, possibly followed by some
4819 instructions that must stay with the call, 0 otherwise. */
4820
4821 static bool
rtl_block_ends_with_call_p(basic_block bb)4822 rtl_block_ends_with_call_p (basic_block bb)
4823 {
4824 rtx_insn *insn = BB_END (bb);
4825
4826 while (!CALL_P (insn)
4827 && insn != BB_HEAD (bb)
4828 && (keep_with_call_p (insn)
4829 || NOTE_P (insn)
4830 || DEBUG_INSN_P (insn)))
4831 insn = PREV_INSN (insn);
4832 return (CALL_P (insn));
4833 }
4834
4835 /* Return 1 if BB ends with a conditional branch, 0 otherwise. */
4836
4837 static bool
rtl_block_ends_with_condjump_p(const_basic_block bb)4838 rtl_block_ends_with_condjump_p (const_basic_block bb)
4839 {
4840 return any_condjump_p (BB_END (bb));
4841 }
4842
4843 /* Return true if we need to add fake edge to exit.
4844 Helper function for rtl_flow_call_edges_add. */
4845
4846 static bool
need_fake_edge_p(const rtx_insn * insn)4847 need_fake_edge_p (const rtx_insn *insn)
4848 {
4849 if (!INSN_P (insn))
4850 return false;
4851
4852 if ((CALL_P (insn)
4853 && !SIBLING_CALL_P (insn)
4854 && !find_reg_note (insn, REG_NORETURN, NULL)
4855 && !(RTL_CONST_OR_PURE_CALL_P (insn))))
4856 return true;
4857
4858 return ((GET_CODE (PATTERN (insn)) == ASM_OPERANDS
4859 && MEM_VOLATILE_P (PATTERN (insn)))
4860 || (GET_CODE (PATTERN (insn)) == PARALLEL
4861 && asm_noperands (insn) != -1
4862 && MEM_VOLATILE_P (XVECEXP (PATTERN (insn), 0, 0)))
4863 || GET_CODE (PATTERN (insn)) == ASM_INPUT);
4864 }
4865
4866 /* Add fake edges to the function exit for any non constant and non noreturn
4867 calls, volatile inline assembly in the bitmap of blocks specified by
4868 BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks
4869 that were split.
4870
4871 The goal is to expose cases in which entering a basic block does not imply
4872 that all subsequent instructions must be executed. */
4873
4874 static int
rtl_flow_call_edges_add(sbitmap blocks)4875 rtl_flow_call_edges_add (sbitmap blocks)
4876 {
4877 int i;
4878 int blocks_split = 0;
4879 int last_bb = last_basic_block_for_fn (cfun);
4880 bool check_last_block = false;
4881
4882 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
4883 return 0;
4884
4885 if (! blocks)
4886 check_last_block = true;
4887 else
4888 check_last_block = bitmap_bit_p (blocks,
4889 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb->index);
4890
4891 /* In the last basic block, before epilogue generation, there will be
4892 a fallthru edge to EXIT. Special care is required if the last insn
4893 of the last basic block is a call because make_edge folds duplicate
4894 edges, which would result in the fallthru edge also being marked
4895 fake, which would result in the fallthru edge being removed by
4896 remove_fake_edges, which would result in an invalid CFG.
4897
4898 Moreover, we can't elide the outgoing fake edge, since the block
4899 profiler needs to take this into account in order to solve the minimal
4900 spanning tree in the case that the call doesn't return.
4901
4902 Handle this by adding a dummy instruction in a new last basic block. */
4903 if (check_last_block)
4904 {
4905 basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
4906 rtx_insn *insn = BB_END (bb);
4907
4908 /* Back up past insns that must be kept in the same block as a call. */
4909 while (insn != BB_HEAD (bb)
4910 && keep_with_call_p (insn))
4911 insn = PREV_INSN (insn);
4912
4913 if (need_fake_edge_p (insn))
4914 {
4915 edge e;
4916
4917 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
4918 if (e)
4919 {
4920 insert_insn_on_edge (gen_use (const0_rtx), e);
4921 commit_edge_insertions ();
4922 }
4923 }
4924 }
4925
4926 /* Now add fake edges to the function exit for any non constant
4927 calls since there is no way that we can determine if they will
4928 return or not... */
4929
4930 for (i = NUM_FIXED_BLOCKS; i < last_bb; i++)
4931 {
4932 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
4933 rtx_insn *insn;
4934 rtx_insn *prev_insn;
4935
4936 if (!bb)
4937 continue;
4938
4939 if (blocks && !bitmap_bit_p (blocks, i))
4940 continue;
4941
4942 for (insn = BB_END (bb); ; insn = prev_insn)
4943 {
4944 prev_insn = PREV_INSN (insn);
4945 if (need_fake_edge_p (insn))
4946 {
4947 edge e;
4948 rtx_insn *split_at_insn = insn;
4949
4950 /* Don't split the block between a call and an insn that should
4951 remain in the same block as the call. */
4952 if (CALL_P (insn))
4953 while (split_at_insn != BB_END (bb)
4954 && keep_with_call_p (NEXT_INSN (split_at_insn)))
4955 split_at_insn = NEXT_INSN (split_at_insn);
4956
4957 /* The handling above of the final block before the epilogue
4958 should be enough to verify that there is no edge to the exit
4959 block in CFG already. Calling make_edge in such case would
4960 cause us to mark that edge as fake and remove it later. */
4961
4962 if (flag_checking && split_at_insn == BB_END (bb))
4963 {
4964 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
4965 gcc_assert (e == NULL);
4966 }
4967
4968 /* Note that the following may create a new basic block
4969 and renumber the existing basic blocks. */
4970 if (split_at_insn != BB_END (bb))
4971 {
4972 e = split_block (bb, split_at_insn);
4973 if (e)
4974 blocks_split++;
4975 }
4976
4977 edge ne = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
4978 ne->probability = profile_probability::guessed_never ();
4979 }
4980
4981 if (insn == BB_HEAD (bb))
4982 break;
4983 }
4984 }
4985
4986 if (blocks_split)
4987 verify_flow_info ();
4988
4989 return blocks_split;
4990 }
4991
4992 /* Add COMP_RTX as a condition at end of COND_BB. FIRST_HEAD is
4993 the conditional branch target, SECOND_HEAD should be the fall-thru
4994 there is no need to handle this here the loop versioning code handles
4995 this. the reason for SECON_HEAD is that it is needed for condition
4996 in trees, and this should be of the same type since it is a hook. */
4997 static void
rtl_lv_add_condition_to_bb(basic_block first_head,basic_block second_head ATTRIBUTE_UNUSED,basic_block cond_bb,void * comp_rtx)4998 rtl_lv_add_condition_to_bb (basic_block first_head ,
4999 basic_block second_head ATTRIBUTE_UNUSED,
5000 basic_block cond_bb, void *comp_rtx)
5001 {
5002 rtx_code_label *label;
5003 rtx_insn *seq, *jump;
5004 rtx op0 = XEXP ((rtx)comp_rtx, 0);
5005 rtx op1 = XEXP ((rtx)comp_rtx, 1);
5006 enum rtx_code comp = GET_CODE ((rtx)comp_rtx);
5007 machine_mode mode;
5008
5009
5010 label = block_label (first_head);
5011 mode = GET_MODE (op0);
5012 if (mode == VOIDmode)
5013 mode = GET_MODE (op1);
5014
5015 start_sequence ();
5016 op0 = force_operand (op0, NULL_RTX);
5017 op1 = force_operand (op1, NULL_RTX);
5018 do_compare_rtx_and_jump (op0, op1, comp, 0, mode, NULL_RTX, NULL, label,
5019 profile_probability::uninitialized ());
5020 jump = get_last_insn ();
5021 JUMP_LABEL (jump) = label;
5022 LABEL_NUSES (label)++;
5023 seq = get_insns ();
5024 end_sequence ();
5025
5026 /* Add the new cond, in the new head. */
5027 emit_insn_after (seq, BB_END (cond_bb));
5028 }
5029
5030
5031 /* Given a block B with unconditional branch at its end, get the
5032 store the return the branch edge and the fall-thru edge in
5033 BRANCH_EDGE and FALLTHRU_EDGE respectively. */
5034 static void
rtl_extract_cond_bb_edges(basic_block b,edge * branch_edge,edge * fallthru_edge)5035 rtl_extract_cond_bb_edges (basic_block b, edge *branch_edge,
5036 edge *fallthru_edge)
5037 {
5038 edge e = EDGE_SUCC (b, 0);
5039
5040 if (e->flags & EDGE_FALLTHRU)
5041 {
5042 *fallthru_edge = e;
5043 *branch_edge = EDGE_SUCC (b, 1);
5044 }
5045 else
5046 {
5047 *branch_edge = e;
5048 *fallthru_edge = EDGE_SUCC (b, 1);
5049 }
5050 }
5051
5052 void
init_rtl_bb_info(basic_block bb)5053 init_rtl_bb_info (basic_block bb)
5054 {
5055 gcc_assert (!bb->il.x.rtl);
5056 bb->il.x.head_ = NULL;
5057 bb->il.x.rtl = ggc_cleared_alloc<rtl_bb_info> ();
5058 }
5059
5060 /* Returns true if it is possible to remove edge E by redirecting
5061 it to the destination of the other edge from E->src. */
5062
5063 static bool
rtl_can_remove_branch_p(const_edge e)5064 rtl_can_remove_branch_p (const_edge e)
5065 {
5066 const_basic_block src = e->src;
5067 const_basic_block target = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest;
5068 const rtx_insn *insn = BB_END (src);
5069 rtx set;
5070
5071 /* The conditions are taken from try_redirect_by_replacing_jump. */
5072 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
5073 return false;
5074
5075 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
5076 return false;
5077
5078 if (BB_PARTITION (src) != BB_PARTITION (target))
5079 return false;
5080
5081 if (!onlyjump_p (insn)
5082 || tablejump_p (insn, NULL, NULL))
5083 return false;
5084
5085 set = single_set (insn);
5086 if (!set || side_effects_p (set))
5087 return false;
5088
5089 return true;
5090 }
5091
5092 static basic_block
rtl_duplicate_bb(basic_block bb,copy_bb_data * id)5093 rtl_duplicate_bb (basic_block bb, copy_bb_data *id)
5094 {
5095 bb = cfg_layout_duplicate_bb (bb, id);
5096 bb->aux = NULL;
5097 return bb;
5098 }
5099
5100 /* Do book-keeping of basic block BB for the profile consistency checker.
5101 Store the counting in RECORD. */
5102 static void
rtl_account_profile_record(basic_block bb,struct profile_record * record)5103 rtl_account_profile_record (basic_block bb, struct profile_record *record)
5104 {
5105 rtx_insn *insn;
5106 FOR_BB_INSNS (bb, insn)
5107 if (INSN_P (insn))
5108 {
5109 record->size += insn_cost (insn, false);
5110 if (bb->count.initialized_p ())
5111 record->time
5112 += insn_cost (insn, true) * bb->count.to_gcov_type ();
5113 else if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
5114 record->time
5115 += insn_cost (insn, true) * bb->count.to_frequency (cfun);
5116 }
5117 }
5118
5119 /* Implementation of CFG manipulation for linearized RTL. */
5120 struct cfg_hooks rtl_cfg_hooks = {
5121 "rtl",
5122 rtl_verify_flow_info,
5123 rtl_dump_bb,
5124 rtl_dump_bb_for_graph,
5125 rtl_create_basic_block,
5126 rtl_redirect_edge_and_branch,
5127 rtl_redirect_edge_and_branch_force,
5128 rtl_can_remove_branch_p,
5129 rtl_delete_block,
5130 rtl_split_block,
5131 rtl_move_block_after,
5132 rtl_can_merge_blocks, /* can_merge_blocks_p */
5133 rtl_merge_blocks,
5134 rtl_predict_edge,
5135 rtl_predicted_by_p,
5136 cfg_layout_can_duplicate_bb_p,
5137 rtl_duplicate_bb,
5138 rtl_split_edge,
5139 rtl_make_forwarder_block,
5140 rtl_tidy_fallthru_edge,
5141 rtl_force_nonfallthru,
5142 rtl_block_ends_with_call_p,
5143 rtl_block_ends_with_condjump_p,
5144 rtl_flow_call_edges_add,
5145 NULL, /* execute_on_growing_pred */
5146 NULL, /* execute_on_shrinking_pred */
5147 NULL, /* duplicate loop for trees */
5148 NULL, /* lv_add_condition_to_bb */
5149 NULL, /* lv_adjust_loop_header_phi*/
5150 NULL, /* extract_cond_bb_edges */
5151 NULL, /* flush_pending_stmts */
5152 rtl_block_empty_p, /* block_empty_p */
5153 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5154 rtl_account_profile_record,
5155 };
5156
5157 /* Implementation of CFG manipulation for cfg layout RTL, where
5158 basic block connected via fallthru edges does not have to be adjacent.
5159 This representation will hopefully become the default one in future
5160 version of the compiler. */
5161
5162 struct cfg_hooks cfg_layout_rtl_cfg_hooks = {
5163 "cfglayout mode",
5164 rtl_verify_flow_info_1,
5165 rtl_dump_bb,
5166 rtl_dump_bb_for_graph,
5167 cfg_layout_create_basic_block,
5168 cfg_layout_redirect_edge_and_branch,
5169 cfg_layout_redirect_edge_and_branch_force,
5170 rtl_can_remove_branch_p,
5171 cfg_layout_delete_block,
5172 cfg_layout_split_block,
5173 rtl_move_block_after,
5174 cfg_layout_can_merge_blocks_p,
5175 cfg_layout_merge_blocks,
5176 rtl_predict_edge,
5177 rtl_predicted_by_p,
5178 cfg_layout_can_duplicate_bb_p,
5179 cfg_layout_duplicate_bb,
5180 cfg_layout_split_edge,
5181 rtl_make_forwarder_block,
5182 NULL, /* tidy_fallthru_edge */
5183 rtl_force_nonfallthru,
5184 rtl_block_ends_with_call_p,
5185 rtl_block_ends_with_condjump_p,
5186 rtl_flow_call_edges_add,
5187 NULL, /* execute_on_growing_pred */
5188 NULL, /* execute_on_shrinking_pred */
5189 duplicate_loop_to_header_edge, /* duplicate loop for trees */
5190 rtl_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
5191 NULL, /* lv_adjust_loop_header_phi*/
5192 rtl_extract_cond_bb_edges, /* extract_cond_bb_edges */
5193 NULL, /* flush_pending_stmts */
5194 rtl_block_empty_p, /* block_empty_p */
5195 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5196 rtl_account_profile_record,
5197 };
5198
5199 #include "gt-cfgrtl.h"
5200