1 /* Control flow graph manipulation code for GNU compiler.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This file contains low level functions to manipulate the CFG and analyze it
21 that are aware of the RTL intermediate language.
22
23 Available functionality:
24 - Basic CFG/RTL manipulation API documented in cfghooks.h
25 - CFG-aware instruction chain manipulation
26 delete_insn, delete_insn_chain
27 - Edge splitting and committing to edges
28 insert_insn_on_edge, commit_edge_insertions
29 - CFG updating after insn simplification
30 purge_dead_edges, purge_all_dead_edges
31 - CFG fixing after coarse manipulation
32 fixup_abnormal_edges
33
34 Functions not supposed for generic use:
35 - Infrastructure to determine quickly basic block for insn
36 compute_bb_for_insn, update_bb_for_insn, set_block_for_insn,
37 - Edge redirection with updating and optimizing of insn chain
38 block_label, tidy_fallthru_edge, force_nonfallthru */
39
40 #include "config.h"
41 #include "system.h"
42 #include "coretypes.h"
43 #include "backend.h"
44 #include "target.h"
45 #include "rtl.h"
46 #include "tree.h"
47 #include "cfghooks.h"
48 #include "df.h"
49 #include "insn-config.h"
50 #include "memmodel.h"
51 #include "emit-rtl.h"
52 #include "cfgrtl.h"
53 #include "cfganal.h"
54 #include "cfgbuild.h"
55 #include "cfgcleanup.h"
56 #include "bb-reorder.h"
57 #include "rtl-error.h"
58 #include "insn-attr.h"
59 #include "dojump.h"
60 #include "expr.h"
61 #include "cfgloop.h"
62 #include "tree-pass.h"
63 #include "print-rtl.h"
64
65 /* Holds the interesting leading and trailing notes for the function.
66 Only applicable if the CFG is in cfglayout mode. */
67 static GTY(()) rtx_insn *cfg_layout_function_footer;
68 static GTY(()) rtx_insn *cfg_layout_function_header;
69
70 static rtx_insn *skip_insns_after_block (basic_block);
71 static void record_effective_endpoints (void);
72 static void fixup_reorder_chain (void);
73
74 void verify_insn_chain (void);
75 static void fixup_fallthru_exit_predecessor (void);
76 static int can_delete_note_p (const rtx_note *);
77 static int can_delete_label_p (const rtx_code_label *);
78 static basic_block rtl_split_edge (edge);
79 static bool rtl_move_block_after (basic_block, basic_block);
80 static int rtl_verify_flow_info (void);
81 static basic_block cfg_layout_split_block (basic_block, void *);
82 static edge cfg_layout_redirect_edge_and_branch (edge, basic_block);
83 static basic_block cfg_layout_redirect_edge_and_branch_force (edge, basic_block);
84 static void cfg_layout_delete_block (basic_block);
85 static void rtl_delete_block (basic_block);
86 static basic_block rtl_redirect_edge_and_branch_force (edge, basic_block);
87 static edge rtl_redirect_edge_and_branch (edge, basic_block);
88 static basic_block rtl_split_block (basic_block, void *);
89 static void rtl_dump_bb (FILE *, basic_block, int, dump_flags_t);
90 static int rtl_verify_flow_info_1 (void);
91 static void rtl_make_forwarder_block (edge);
92
93 /* Return true if NOTE is not one of the ones that must be kept paired,
94 so that we may simply delete it. */
95
96 static int
can_delete_note_p(const rtx_note * note)97 can_delete_note_p (const rtx_note *note)
98 {
99 switch (NOTE_KIND (note))
100 {
101 case NOTE_INSN_DELETED:
102 case NOTE_INSN_BASIC_BLOCK:
103 case NOTE_INSN_EPILOGUE_BEG:
104 return true;
105
106 default:
107 return false;
108 }
109 }
110
111 /* True if a given label can be deleted. */
112
113 static int
can_delete_label_p(const rtx_code_label * label)114 can_delete_label_p (const rtx_code_label *label)
115 {
116 return (!LABEL_PRESERVE_P (label)
117 /* User declared labels must be preserved. */
118 && LABEL_NAME (label) == 0
119 && !vec_safe_contains<rtx_insn *> (forced_labels,
120 const_cast<rtx_code_label *> (label)));
121 }
122
123 /* Delete INSN by patching it out. */
124
125 void
delete_insn(rtx_insn * insn)126 delete_insn (rtx_insn *insn)
127 {
128 rtx note;
129 bool really_delete = true;
130
131 if (LABEL_P (insn))
132 {
133 /* Some labels can't be directly removed from the INSN chain, as they
134 might be references via variables, constant pool etc.
135 Convert them to the special NOTE_INSN_DELETED_LABEL note. */
136 if (! can_delete_label_p (as_a <rtx_code_label *> (insn)))
137 {
138 const char *name = LABEL_NAME (insn);
139 basic_block bb = BLOCK_FOR_INSN (insn);
140 rtx_insn *bb_note = NEXT_INSN (insn);
141
142 really_delete = false;
143 PUT_CODE (insn, NOTE);
144 NOTE_KIND (insn) = NOTE_INSN_DELETED_LABEL;
145 NOTE_DELETED_LABEL_NAME (insn) = name;
146
147 /* If the note following the label starts a basic block, and the
148 label is a member of the same basic block, interchange the two. */
149 if (bb_note != NULL_RTX
150 && NOTE_INSN_BASIC_BLOCK_P (bb_note)
151 && bb != NULL
152 && bb == BLOCK_FOR_INSN (bb_note))
153 {
154 reorder_insns_nobb (insn, insn, bb_note);
155 BB_HEAD (bb) = bb_note;
156 if (BB_END (bb) == bb_note)
157 BB_END (bb) = insn;
158 }
159 }
160
161 remove_node_from_insn_list (insn, &nonlocal_goto_handler_labels);
162 }
163
164 if (really_delete)
165 {
166 /* If this insn has already been deleted, something is very wrong. */
167 gcc_assert (!insn->deleted ());
168 if (INSN_P (insn))
169 df_insn_delete (insn);
170 remove_insn (insn);
171 insn->set_deleted ();
172 }
173
174 /* If deleting a jump, decrement the use count of the label. Deleting
175 the label itself should happen in the normal course of block merging. */
176 if (JUMP_P (insn))
177 {
178 if (JUMP_LABEL (insn)
179 && LABEL_P (JUMP_LABEL (insn)))
180 LABEL_NUSES (JUMP_LABEL (insn))--;
181
182 /* If there are more targets, remove them too. */
183 while ((note
184 = find_reg_note (insn, REG_LABEL_TARGET, NULL_RTX)) != NULL_RTX
185 && LABEL_P (XEXP (note, 0)))
186 {
187 LABEL_NUSES (XEXP (note, 0))--;
188 remove_note (insn, note);
189 }
190 }
191
192 /* Also if deleting any insn that references a label as an operand. */
193 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX)) != NULL_RTX
194 && LABEL_P (XEXP (note, 0)))
195 {
196 LABEL_NUSES (XEXP (note, 0))--;
197 remove_note (insn, note);
198 }
199
200 if (rtx_jump_table_data *table = dyn_cast <rtx_jump_table_data *> (insn))
201 {
202 rtvec vec = table->get_labels ();
203 int len = GET_NUM_ELEM (vec);
204 int i;
205
206 for (i = 0; i < len; i++)
207 {
208 rtx label = XEXP (RTVEC_ELT (vec, i), 0);
209
210 /* When deleting code in bulk (e.g. removing many unreachable
211 blocks) we can delete a label that's a target of the vector
212 before deleting the vector itself. */
213 if (!NOTE_P (label))
214 LABEL_NUSES (label)--;
215 }
216 }
217 }
218
219 /* Like delete_insn but also purge dead edges from BB.
220 Return true if any edges are eliminated. */
221
222 bool
delete_insn_and_edges(rtx_insn * insn)223 delete_insn_and_edges (rtx_insn *insn)
224 {
225 bool purge = false;
226
227 if (INSN_P (insn)
228 && BLOCK_FOR_INSN (insn)
229 && BB_END (BLOCK_FOR_INSN (insn)) == insn)
230 purge = true;
231 delete_insn (insn);
232 if (purge)
233 return purge_dead_edges (BLOCK_FOR_INSN (insn));
234 return false;
235 }
236
237 /* Unlink a chain of insns between START and FINISH, leaving notes
238 that must be paired. If CLEAR_BB is true, we set bb field for
239 insns that cannot be removed to NULL. */
240
241 void
delete_insn_chain(rtx start,rtx_insn * finish,bool clear_bb)242 delete_insn_chain (rtx start, rtx_insn *finish, bool clear_bb)
243 {
244 /* Unchain the insns one by one. It would be quicker to delete all of these
245 with a single unchaining, rather than one at a time, but we need to keep
246 the NOTE's. */
247 rtx_insn *current = finish;
248 while (1)
249 {
250 rtx_insn *prev = PREV_INSN (current);
251 if (NOTE_P (current) && !can_delete_note_p (as_a <rtx_note *> (current)))
252 ;
253 else
254 delete_insn (current);
255
256 if (clear_bb && !current->deleted ())
257 set_block_for_insn (current, NULL);
258
259 if (current == start)
260 break;
261 current = prev;
262 }
263 }
264
265 /* Create a new basic block consisting of the instructions between HEAD and END
266 inclusive. This function is designed to allow fast BB construction - reuses
267 the note and basic block struct in BB_NOTE, if any and do not grow
268 BASIC_BLOCK chain and should be used directly only by CFG construction code.
269 END can be NULL in to create new empty basic block before HEAD. Both END
270 and HEAD can be NULL to create basic block at the end of INSN chain.
271 AFTER is the basic block we should be put after. */
272
273 basic_block
create_basic_block_structure(rtx_insn * head,rtx_insn * end,rtx_note * bb_note,basic_block after)274 create_basic_block_structure (rtx_insn *head, rtx_insn *end, rtx_note *bb_note,
275 basic_block after)
276 {
277 basic_block bb;
278
279 if (bb_note
280 && (bb = NOTE_BASIC_BLOCK (bb_note)) != NULL
281 && bb->aux == NULL)
282 {
283 /* If we found an existing note, thread it back onto the chain. */
284
285 rtx_insn *after;
286
287 if (LABEL_P (head))
288 after = head;
289 else
290 {
291 after = PREV_INSN (head);
292 head = bb_note;
293 }
294
295 if (after != bb_note && NEXT_INSN (after) != bb_note)
296 reorder_insns_nobb (bb_note, bb_note, after);
297 }
298 else
299 {
300 /* Otherwise we must create a note and a basic block structure. */
301
302 bb = alloc_block ();
303
304 init_rtl_bb_info (bb);
305 if (!head && !end)
306 head = end = bb_note
307 = emit_note_after (NOTE_INSN_BASIC_BLOCK, get_last_insn ());
308 else if (LABEL_P (head) && end)
309 {
310 bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, head);
311 if (head == end)
312 end = bb_note;
313 }
314 else
315 {
316 bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, head);
317 head = bb_note;
318 if (!end)
319 end = head;
320 }
321
322 NOTE_BASIC_BLOCK (bb_note) = bb;
323 }
324
325 /* Always include the bb note in the block. */
326 if (NEXT_INSN (end) == bb_note)
327 end = bb_note;
328
329 BB_HEAD (bb) = head;
330 BB_END (bb) = end;
331 bb->index = last_basic_block_for_fn (cfun)++;
332 bb->flags = BB_NEW | BB_RTL;
333 link_block (bb, after);
334 SET_BASIC_BLOCK_FOR_FN (cfun, bb->index, bb);
335 df_bb_refs_record (bb->index, false);
336 update_bb_for_insn (bb);
337 BB_SET_PARTITION (bb, BB_UNPARTITIONED);
338
339 /* Tag the block so that we know it has been used when considering
340 other basic block notes. */
341 bb->aux = bb;
342
343 return bb;
344 }
345
346 /* Create new basic block consisting of instructions in between HEAD and END
347 and place it to the BB chain after block AFTER. END can be NULL to
348 create a new empty basic block before HEAD. Both END and HEAD can be
349 NULL to create basic block at the end of INSN chain. */
350
351 static basic_block
rtl_create_basic_block(void * headp,void * endp,basic_block after)352 rtl_create_basic_block (void *headp, void *endp, basic_block after)
353 {
354 rtx_insn *head = (rtx_insn *) headp;
355 rtx_insn *end = (rtx_insn *) endp;
356 basic_block bb;
357
358 /* Grow the basic block array if needed. */
359 if ((size_t) last_basic_block_for_fn (cfun)
360 >= basic_block_info_for_fn (cfun)->length ())
361 {
362 size_t new_size =
363 (last_basic_block_for_fn (cfun)
364 + (last_basic_block_for_fn (cfun) + 3) / 4);
365 vec_safe_grow_cleared (basic_block_info_for_fn (cfun), new_size);
366 }
367
368 n_basic_blocks_for_fn (cfun)++;
369
370 bb = create_basic_block_structure (head, end, NULL, after);
371 bb->aux = NULL;
372 return bb;
373 }
374
375 static basic_block
cfg_layout_create_basic_block(void * head,void * end,basic_block after)376 cfg_layout_create_basic_block (void *head, void *end, basic_block after)
377 {
378 basic_block newbb = rtl_create_basic_block (head, end, after);
379
380 return newbb;
381 }
382
383 /* Delete the insns in a (non-live) block. We physically delete every
384 non-deleted-note insn, and update the flow graph appropriately.
385
386 Return nonzero if we deleted an exception handler. */
387
388 /* ??? Preserving all such notes strikes me as wrong. It would be nice
389 to post-process the stream to remove empty blocks, loops, ranges, etc. */
390
391 static void
rtl_delete_block(basic_block b)392 rtl_delete_block (basic_block b)
393 {
394 rtx_insn *insn, *end;
395
396 /* If the head of this block is a CODE_LABEL, then it might be the
397 label for an exception handler which can't be reached. We need
398 to remove the label from the exception_handler_label list. */
399 insn = BB_HEAD (b);
400
401 end = get_last_bb_insn (b);
402
403 /* Selectively delete the entire chain. */
404 BB_HEAD (b) = NULL;
405 delete_insn_chain (insn, end, true);
406
407
408 if (dump_file)
409 fprintf (dump_file, "deleting block %d\n", b->index);
410 df_bb_delete (b->index);
411 }
412
413 /* Records the basic block struct in BLOCK_FOR_INSN for every insn. */
414
415 void
compute_bb_for_insn(void)416 compute_bb_for_insn (void)
417 {
418 basic_block bb;
419
420 FOR_EACH_BB_FN (bb, cfun)
421 {
422 rtx_insn *end = BB_END (bb);
423 rtx_insn *insn;
424
425 for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
426 {
427 BLOCK_FOR_INSN (insn) = bb;
428 if (insn == end)
429 break;
430 }
431 }
432 }
433
434 /* Release the basic_block_for_insn array. */
435
436 unsigned int
free_bb_for_insn(void)437 free_bb_for_insn (void)
438 {
439 rtx_insn *insn;
440 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
441 if (!BARRIER_P (insn))
442 BLOCK_FOR_INSN (insn) = NULL;
443 return 0;
444 }
445
446 namespace {
447
448 const pass_data pass_data_free_cfg =
449 {
450 RTL_PASS, /* type */
451 "*free_cfg", /* name */
452 OPTGROUP_NONE, /* optinfo_flags */
453 TV_NONE, /* tv_id */
454 0, /* properties_required */
455 0, /* properties_provided */
456 PROP_cfg, /* properties_destroyed */
457 0, /* todo_flags_start */
458 0, /* todo_flags_finish */
459 };
460
461 class pass_free_cfg : public rtl_opt_pass
462 {
463 public:
pass_free_cfg(gcc::context * ctxt)464 pass_free_cfg (gcc::context *ctxt)
465 : rtl_opt_pass (pass_data_free_cfg, ctxt)
466 {}
467
468 /* opt_pass methods: */
469 virtual unsigned int execute (function *);
470
471 }; // class pass_free_cfg
472
473 unsigned int
execute(function *)474 pass_free_cfg::execute (function *)
475 {
476 /* The resource.c machinery uses DF but the CFG isn't guaranteed to be
477 valid at that point so it would be too late to call df_analyze. */
478 if (DELAY_SLOTS && optimize > 0 && flag_delayed_branch)
479 {
480 df_note_add_problem ();
481 df_analyze ();
482 }
483
484 if (crtl->has_bb_partition)
485 insert_section_boundary_note ();
486
487 free_bb_for_insn ();
488 return 0;
489 }
490
491 } // anon namespace
492
493 rtl_opt_pass *
make_pass_free_cfg(gcc::context * ctxt)494 make_pass_free_cfg (gcc::context *ctxt)
495 {
496 return new pass_free_cfg (ctxt);
497 }
498
499 /* Return RTX to emit after when we want to emit code on the entry of function. */
500 rtx_insn *
entry_of_function(void)501 entry_of_function (void)
502 {
503 return (n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS ?
504 BB_HEAD (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb) : get_insns ());
505 }
506
507 /* Emit INSN at the entry point of the function, ensuring that it is only
508 executed once per function. */
509 void
emit_insn_at_entry(rtx insn)510 emit_insn_at_entry (rtx insn)
511 {
512 edge_iterator ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
513 edge e = ei_safe_edge (ei);
514 gcc_assert (e->flags & EDGE_FALLTHRU);
515
516 insert_insn_on_edge (insn, e);
517 commit_edge_insertions ();
518 }
519
520 /* Update BLOCK_FOR_INSN of insns between BEGIN and END
521 (or BARRIER if found) and notify df of the bb change.
522 The insn chain range is inclusive
523 (i.e. both BEGIN and END will be updated. */
524
525 static void
update_bb_for_insn_chain(rtx_insn * begin,rtx_insn * end,basic_block bb)526 update_bb_for_insn_chain (rtx_insn *begin, rtx_insn *end, basic_block bb)
527 {
528 rtx_insn *insn;
529
530 end = NEXT_INSN (end);
531 for (insn = begin; insn != end; insn = NEXT_INSN (insn))
532 if (!BARRIER_P (insn))
533 df_insn_change_bb (insn, bb);
534 }
535
536 /* Update BLOCK_FOR_INSN of insns in BB to BB,
537 and notify df of the change. */
538
539 void
update_bb_for_insn(basic_block bb)540 update_bb_for_insn (basic_block bb)
541 {
542 update_bb_for_insn_chain (BB_HEAD (bb), BB_END (bb), bb);
543 }
544
545
546 /* Like active_insn_p, except keep the return value clobber around
547 even after reload. */
548
549 static bool
flow_active_insn_p(const rtx_insn * insn)550 flow_active_insn_p (const rtx_insn *insn)
551 {
552 if (active_insn_p (insn))
553 return true;
554
555 /* A clobber of the function return value exists for buggy
556 programs that fail to return a value. Its effect is to
557 keep the return value from being live across the entire
558 function. If we allow it to be skipped, we introduce the
559 possibility for register lifetime confusion. */
560 if (GET_CODE (PATTERN (insn)) == CLOBBER
561 && REG_P (XEXP (PATTERN (insn), 0))
562 && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0)))
563 return true;
564
565 return false;
566 }
567
568 /* Return true if the block has no effect and only forwards control flow to
569 its single destination. */
570
571 bool
contains_no_active_insn_p(const_basic_block bb)572 contains_no_active_insn_p (const_basic_block bb)
573 {
574 rtx_insn *insn;
575
576 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
577 || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
578 || !single_succ_p (bb)
579 || (single_succ_edge (bb)->flags & EDGE_FAKE) != 0)
580 return false;
581
582 for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
583 if (INSN_P (insn) && flow_active_insn_p (insn))
584 return false;
585
586 return (!INSN_P (insn)
587 || (JUMP_P (insn) && simplejump_p (insn))
588 || !flow_active_insn_p (insn));
589 }
590
591 /* Likewise, but protect loop latches, headers and preheaders. */
592 /* FIXME: Make this a cfg hook. */
593
594 bool
forwarder_block_p(const_basic_block bb)595 forwarder_block_p (const_basic_block bb)
596 {
597 if (!contains_no_active_insn_p (bb))
598 return false;
599
600 /* Protect loop latches, headers and preheaders. */
601 if (current_loops)
602 {
603 basic_block dest;
604 if (bb->loop_father->header == bb)
605 return false;
606 dest = EDGE_SUCC (bb, 0)->dest;
607 if (dest->loop_father->header == dest)
608 return false;
609 }
610
611 return true;
612 }
613
614 /* Return nonzero if we can reach target from src by falling through. */
615 /* FIXME: Make this a cfg hook, the result is only valid in cfgrtl mode. */
616
617 bool
can_fallthru(basic_block src,basic_block target)618 can_fallthru (basic_block src, basic_block target)
619 {
620 rtx_insn *insn = BB_END (src);
621 rtx_insn *insn2;
622 edge e;
623 edge_iterator ei;
624
625 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
626 return true;
627 if (src->next_bb != target)
628 return false;
629
630 /* ??? Later we may add code to move jump tables offline. */
631 if (tablejump_p (insn, NULL, NULL))
632 return false;
633
634 FOR_EACH_EDGE (e, ei, src->succs)
635 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
636 && e->flags & EDGE_FALLTHRU)
637 return false;
638
639 insn2 = BB_HEAD (target);
640 if (!active_insn_p (insn2))
641 insn2 = next_active_insn (insn2);
642
643 return next_active_insn (insn) == insn2;
644 }
645
646 /* Return nonzero if we could reach target from src by falling through,
647 if the target was made adjacent. If we already have a fall-through
648 edge to the exit block, we can't do that. */
649 static bool
could_fall_through(basic_block src,basic_block target)650 could_fall_through (basic_block src, basic_block target)
651 {
652 edge e;
653 edge_iterator ei;
654
655 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
656 return true;
657 FOR_EACH_EDGE (e, ei, src->succs)
658 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
659 && e->flags & EDGE_FALLTHRU)
660 return 0;
661 return true;
662 }
663
664 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
665 rtx_note *
bb_note(basic_block bb)666 bb_note (basic_block bb)
667 {
668 rtx_insn *note;
669
670 note = BB_HEAD (bb);
671 if (LABEL_P (note))
672 note = NEXT_INSN (note);
673
674 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
675 return as_a <rtx_note *> (note);
676 }
677
678 /* Return the INSN immediately following the NOTE_INSN_BASIC_BLOCK
679 note associated with the BLOCK. */
680
681 static rtx_insn *
first_insn_after_basic_block_note(basic_block block)682 first_insn_after_basic_block_note (basic_block block)
683 {
684 rtx_insn *insn;
685
686 /* Get the first instruction in the block. */
687 insn = BB_HEAD (block);
688
689 if (insn == NULL_RTX)
690 return NULL;
691 if (LABEL_P (insn))
692 insn = NEXT_INSN (insn);
693 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
694
695 return NEXT_INSN (insn);
696 }
697
698 /* Creates a new basic block just after basic block BB by splitting
699 everything after specified instruction INSNP. */
700
701 static basic_block
rtl_split_block(basic_block bb,void * insnp)702 rtl_split_block (basic_block bb, void *insnp)
703 {
704 basic_block new_bb;
705 rtx_insn *insn = (rtx_insn *) insnp;
706 edge e;
707 edge_iterator ei;
708
709 if (!insn)
710 {
711 insn = first_insn_after_basic_block_note (bb);
712
713 if (insn)
714 {
715 rtx_insn *next = insn;
716
717 insn = PREV_INSN (insn);
718
719 /* If the block contains only debug insns, insn would have
720 been NULL in a non-debug compilation, and then we'd end
721 up emitting a DELETED note. For -fcompare-debug
722 stability, emit the note too. */
723 if (insn != BB_END (bb)
724 && DEBUG_INSN_P (next)
725 && DEBUG_INSN_P (BB_END (bb)))
726 {
727 while (next != BB_END (bb) && DEBUG_INSN_P (next))
728 next = NEXT_INSN (next);
729
730 if (next == BB_END (bb))
731 emit_note_after (NOTE_INSN_DELETED, next);
732 }
733 }
734 else
735 insn = get_last_insn ();
736 }
737
738 /* We probably should check type of the insn so that we do not create
739 inconsistent cfg. It is checked in verify_flow_info anyway, so do not
740 bother. */
741 if (insn == BB_END (bb))
742 emit_note_after (NOTE_INSN_DELETED, insn);
743
744 /* Create the new basic block. */
745 new_bb = create_basic_block (NEXT_INSN (insn), BB_END (bb), bb);
746 BB_COPY_PARTITION (new_bb, bb);
747 BB_END (bb) = insn;
748
749 /* Redirect the outgoing edges. */
750 new_bb->succs = bb->succs;
751 bb->succs = NULL;
752 FOR_EACH_EDGE (e, ei, new_bb->succs)
753 e->src = new_bb;
754
755 /* The new block starts off being dirty. */
756 df_set_bb_dirty (bb);
757 return new_bb;
758 }
759
760 /* Return true if the single edge between blocks A and B is the only place
761 in RTL which holds some unique locus. */
762
763 static bool
unique_locus_on_edge_between_p(basic_block a,basic_block b)764 unique_locus_on_edge_between_p (basic_block a, basic_block b)
765 {
766 const location_t goto_locus = EDGE_SUCC (a, 0)->goto_locus;
767 rtx_insn *insn, *end;
768
769 if (LOCATION_LOCUS (goto_locus) == UNKNOWN_LOCATION)
770 return false;
771
772 /* First scan block A backward. */
773 insn = BB_END (a);
774 end = PREV_INSN (BB_HEAD (a));
775 while (insn != end && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
776 insn = PREV_INSN (insn);
777
778 if (insn != end && INSN_LOCATION (insn) == goto_locus)
779 return false;
780
781 /* Then scan block B forward. */
782 insn = BB_HEAD (b);
783 if (insn)
784 {
785 end = NEXT_INSN (BB_END (b));
786 while (insn != end && !NONDEBUG_INSN_P (insn))
787 insn = NEXT_INSN (insn);
788
789 if (insn != end && INSN_HAS_LOCATION (insn)
790 && INSN_LOCATION (insn) == goto_locus)
791 return false;
792 }
793
794 return true;
795 }
796
797 /* If the single edge between blocks A and B is the only place in RTL which
798 holds some unique locus, emit a nop with that locus between the blocks. */
799
800 static void
emit_nop_for_unique_locus_between(basic_block a,basic_block b)801 emit_nop_for_unique_locus_between (basic_block a, basic_block b)
802 {
803 if (!unique_locus_on_edge_between_p (a, b))
804 return;
805
806 BB_END (a) = emit_insn_after_noloc (gen_nop (), BB_END (a), a);
807 INSN_LOCATION (BB_END (a)) = EDGE_SUCC (a, 0)->goto_locus;
808 }
809
810 /* Blocks A and B are to be merged into a single block A. The insns
811 are already contiguous. */
812
813 static void
rtl_merge_blocks(basic_block a,basic_block b)814 rtl_merge_blocks (basic_block a, basic_block b)
815 {
816 rtx_insn *b_head = BB_HEAD (b), *b_end = BB_END (b), *a_end = BB_END (a);
817 rtx_insn *del_first = NULL, *del_last = NULL;
818 rtx_insn *b_debug_start = b_end, *b_debug_end = b_end;
819 bool forwarder_p = (b->flags & BB_FORWARDER_BLOCK) != 0;
820 int b_empty = 0;
821
822 if (dump_file)
823 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
824 a->index);
825
826 while (DEBUG_INSN_P (b_end))
827 b_end = PREV_INSN (b_debug_start = b_end);
828
829 /* If there was a CODE_LABEL beginning B, delete it. */
830 if (LABEL_P (b_head))
831 {
832 /* Detect basic blocks with nothing but a label. This can happen
833 in particular at the end of a function. */
834 if (b_head == b_end)
835 b_empty = 1;
836
837 del_first = del_last = b_head;
838 b_head = NEXT_INSN (b_head);
839 }
840
841 /* Delete the basic block note and handle blocks containing just that
842 note. */
843 if (NOTE_INSN_BASIC_BLOCK_P (b_head))
844 {
845 if (b_head == b_end)
846 b_empty = 1;
847 if (! del_last)
848 del_first = b_head;
849
850 del_last = b_head;
851 b_head = NEXT_INSN (b_head);
852 }
853
854 /* If there was a jump out of A, delete it. */
855 if (JUMP_P (a_end))
856 {
857 rtx_insn *prev;
858
859 for (prev = PREV_INSN (a_end); ; prev = PREV_INSN (prev))
860 if (!NOTE_P (prev)
861 || NOTE_INSN_BASIC_BLOCK_P (prev)
862 || prev == BB_HEAD (a))
863 break;
864
865 del_first = a_end;
866
867 /* If this was a conditional jump, we need to also delete
868 the insn that set cc0. */
869 if (HAVE_cc0 && only_sets_cc0_p (prev))
870 {
871 rtx_insn *tmp = prev;
872
873 prev = prev_nonnote_insn (prev);
874 if (!prev)
875 prev = BB_HEAD (a);
876 del_first = tmp;
877 }
878
879 a_end = PREV_INSN (del_first);
880 }
881 else if (BARRIER_P (NEXT_INSN (a_end)))
882 del_first = NEXT_INSN (a_end);
883
884 /* Delete everything marked above as well as crap that might be
885 hanging out between the two blocks. */
886 BB_END (a) = a_end;
887 BB_HEAD (b) = b_empty ? NULL : b_head;
888 delete_insn_chain (del_first, del_last, true);
889
890 /* When not optimizing and the edge is the only place in RTL which holds
891 some unique locus, emit a nop with that locus in between. */
892 if (!optimize)
893 {
894 emit_nop_for_unique_locus_between (a, b);
895 a_end = BB_END (a);
896 }
897
898 /* Reassociate the insns of B with A. */
899 if (!b_empty)
900 {
901 update_bb_for_insn_chain (a_end, b_debug_end, a);
902
903 BB_END (a) = b_debug_end;
904 BB_HEAD (b) = NULL;
905 }
906 else if (b_end != b_debug_end)
907 {
908 /* Move any deleted labels and other notes between the end of A
909 and the debug insns that make up B after the debug insns,
910 bringing the debug insns into A while keeping the notes after
911 the end of A. */
912 if (NEXT_INSN (a_end) != b_debug_start)
913 reorder_insns_nobb (NEXT_INSN (a_end), PREV_INSN (b_debug_start),
914 b_debug_end);
915 update_bb_for_insn_chain (b_debug_start, b_debug_end, a);
916 BB_END (a) = b_debug_end;
917 }
918
919 df_bb_delete (b->index);
920
921 /* If B was a forwarder block, propagate the locus on the edge. */
922 if (forwarder_p
923 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION)
924 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
925
926 if (dump_file)
927 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
928 }
929
930
931 /* Return true when block A and B can be merged. */
932
933 static bool
rtl_can_merge_blocks(basic_block a,basic_block b)934 rtl_can_merge_blocks (basic_block a, basic_block b)
935 {
936 /* If we are partitioning hot/cold basic blocks, we don't want to
937 mess up unconditional or indirect jumps that cross between hot
938 and cold sections.
939
940 Basic block partitioning may result in some jumps that appear to
941 be optimizable (or blocks that appear to be mergeable), but which really
942 must be left untouched (they are required to make it safely across
943 partition boundaries). See the comments at the top of
944 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
945
946 if (BB_PARTITION (a) != BB_PARTITION (b))
947 return false;
948
949 /* Protect the loop latches. */
950 if (current_loops && b->loop_father->latch == b)
951 return false;
952
953 /* There must be exactly one edge in between the blocks. */
954 return (single_succ_p (a)
955 && single_succ (a) == b
956 && single_pred_p (b)
957 && a != b
958 /* Must be simple edge. */
959 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
960 && a->next_bb == b
961 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
962 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
963 /* If the jump insn has side effects,
964 we can't kill the edge. */
965 && (!JUMP_P (BB_END (a))
966 || (reload_completed
967 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
968 }
969
970 /* Return the label in the head of basic block BLOCK. Create one if it doesn't
971 exist. */
972
973 rtx_code_label *
block_label(basic_block block)974 block_label (basic_block block)
975 {
976 if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
977 return NULL;
978
979 if (!LABEL_P (BB_HEAD (block)))
980 {
981 BB_HEAD (block) = emit_label_before (gen_label_rtx (), BB_HEAD (block));
982 }
983
984 return as_a <rtx_code_label *> (BB_HEAD (block));
985 }
986
987 /* Remove all barriers from BB_FOOTER of a BB. */
988
989 static void
remove_barriers_from_footer(basic_block bb)990 remove_barriers_from_footer (basic_block bb)
991 {
992 rtx_insn *insn = BB_FOOTER (bb);
993
994 /* Remove barriers but keep jumptables. */
995 while (insn)
996 {
997 if (BARRIER_P (insn))
998 {
999 if (PREV_INSN (insn))
1000 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
1001 else
1002 BB_FOOTER (bb) = NEXT_INSN (insn);
1003 if (NEXT_INSN (insn))
1004 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
1005 }
1006 if (LABEL_P (insn))
1007 return;
1008 insn = NEXT_INSN (insn);
1009 }
1010 }
1011
1012 /* Attempt to perform edge redirection by replacing possibly complex jump
1013 instruction by unconditional jump or removing jump completely. This can
1014 apply only if all edges now point to the same block. The parameters and
1015 return values are equivalent to redirect_edge_and_branch. */
1016
1017 edge
try_redirect_by_replacing_jump(edge e,basic_block target,bool in_cfglayout)1018 try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
1019 {
1020 basic_block src = e->src;
1021 rtx_insn *insn = BB_END (src), *kill_from;
1022 rtx set;
1023 int fallthru = 0;
1024
1025 /* If we are partitioning hot/cold basic blocks, we don't want to
1026 mess up unconditional or indirect jumps that cross between hot
1027 and cold sections.
1028
1029 Basic block partitioning may result in some jumps that appear to
1030 be optimizable (or blocks that appear to be mergeable), but which really
1031 must be left untouched (they are required to make it safely across
1032 partition boundaries). See the comments at the top of
1033 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1034
1035 if (BB_PARTITION (src) != BB_PARTITION (target))
1036 return NULL;
1037
1038 /* We can replace or remove a complex jump only when we have exactly
1039 two edges. Also, if we have exactly one outgoing edge, we can
1040 redirect that. */
1041 if (EDGE_COUNT (src->succs) >= 3
1042 /* Verify that all targets will be TARGET. Specifically, the
1043 edge that is not E must also go to TARGET. */
1044 || (EDGE_COUNT (src->succs) == 2
1045 && EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest != target))
1046 return NULL;
1047
1048 if (!onlyjump_p (insn))
1049 return NULL;
1050 if ((!optimize || reload_completed) && tablejump_p (insn, NULL, NULL))
1051 return NULL;
1052
1053 /* Avoid removing branch with side effects. */
1054 set = single_set (insn);
1055 if (!set || side_effects_p (set))
1056 return NULL;
1057
1058 /* In case we zap a conditional jump, we'll need to kill
1059 the cc0 setter too. */
1060 kill_from = insn;
1061 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, PATTERN (insn))
1062 && only_sets_cc0_p (PREV_INSN (insn)))
1063 kill_from = PREV_INSN (insn);
1064
1065 /* See if we can create the fallthru edge. */
1066 if (in_cfglayout || can_fallthru (src, target))
1067 {
1068 if (dump_file)
1069 fprintf (dump_file, "Removing jump %i.\n", INSN_UID (insn));
1070 fallthru = 1;
1071
1072 /* Selectively unlink whole insn chain. */
1073 if (in_cfglayout)
1074 {
1075 delete_insn_chain (kill_from, BB_END (src), false);
1076 remove_barriers_from_footer (src);
1077 }
1078 else
1079 delete_insn_chain (kill_from, PREV_INSN (BB_HEAD (target)),
1080 false);
1081 }
1082
1083 /* If this already is simplejump, redirect it. */
1084 else if (simplejump_p (insn))
1085 {
1086 if (e->dest == target)
1087 return NULL;
1088 if (dump_file)
1089 fprintf (dump_file, "Redirecting jump %i from %i to %i.\n",
1090 INSN_UID (insn), e->dest->index, target->index);
1091 if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
1092 block_label (target), 0))
1093 {
1094 gcc_assert (target == EXIT_BLOCK_PTR_FOR_FN (cfun));
1095 return NULL;
1096 }
1097 }
1098
1099 /* Cannot do anything for target exit block. */
1100 else if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1101 return NULL;
1102
1103 /* Or replace possibly complicated jump insn by simple jump insn. */
1104 else
1105 {
1106 rtx_code_label *target_label = block_label (target);
1107 rtx_insn *barrier;
1108 rtx_insn *label;
1109 rtx_jump_table_data *table;
1110
1111 emit_jump_insn_after_noloc (targetm.gen_jump (target_label), insn);
1112 JUMP_LABEL (BB_END (src)) = target_label;
1113 LABEL_NUSES (target_label)++;
1114 if (dump_file)
1115 fprintf (dump_file, "Replacing insn %i by jump %i\n",
1116 INSN_UID (insn), INSN_UID (BB_END (src)));
1117
1118
1119 delete_insn_chain (kill_from, insn, false);
1120
1121 /* Recognize a tablejump that we are converting to a
1122 simple jump and remove its associated CODE_LABEL
1123 and ADDR_VEC or ADDR_DIFF_VEC. */
1124 if (tablejump_p (insn, &label, &table))
1125 delete_insn_chain (label, table, false);
1126
1127 barrier = next_nonnote_nondebug_insn (BB_END (src));
1128 if (!barrier || !BARRIER_P (barrier))
1129 emit_barrier_after (BB_END (src));
1130 else
1131 {
1132 if (barrier != NEXT_INSN (BB_END (src)))
1133 {
1134 /* Move the jump before barrier so that the notes
1135 which originally were or were created before jump table are
1136 inside the basic block. */
1137 rtx_insn *new_insn = BB_END (src);
1138
1139 update_bb_for_insn_chain (NEXT_INSN (BB_END (src)),
1140 PREV_INSN (barrier), src);
1141
1142 SET_NEXT_INSN (PREV_INSN (new_insn)) = NEXT_INSN (new_insn);
1143 SET_PREV_INSN (NEXT_INSN (new_insn)) = PREV_INSN (new_insn);
1144
1145 SET_NEXT_INSN (new_insn) = barrier;
1146 SET_NEXT_INSN (PREV_INSN (barrier)) = new_insn;
1147
1148 SET_PREV_INSN (new_insn) = PREV_INSN (barrier);
1149 SET_PREV_INSN (barrier) = new_insn;
1150 }
1151 }
1152 }
1153
1154 /* Keep only one edge out and set proper flags. */
1155 if (!single_succ_p (src))
1156 remove_edge (e);
1157 gcc_assert (single_succ_p (src));
1158
1159 e = single_succ_edge (src);
1160 if (fallthru)
1161 e->flags = EDGE_FALLTHRU;
1162 else
1163 e->flags = 0;
1164
1165 e->probability = profile_probability::always ();
1166
1167 if (e->dest != target)
1168 redirect_edge_succ (e, target);
1169 return e;
1170 }
1171
1172 /* Subroutine of redirect_branch_edge that tries to patch the jump
1173 instruction INSN so that it reaches block NEW. Do this
1174 only when it originally reached block OLD. Return true if this
1175 worked or the original target wasn't OLD, return false if redirection
1176 doesn't work. */
1177
1178 static bool
patch_jump_insn(rtx_insn * insn,rtx_insn * old_label,basic_block new_bb)1179 patch_jump_insn (rtx_insn *insn, rtx_insn *old_label, basic_block new_bb)
1180 {
1181 rtx_jump_table_data *table;
1182 rtx tmp;
1183 /* Recognize a tablejump and adjust all matching cases. */
1184 if (tablejump_p (insn, NULL, &table))
1185 {
1186 rtvec vec;
1187 int j;
1188 rtx_code_label *new_label = block_label (new_bb);
1189
1190 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1191 return false;
1192 vec = table->get_labels ();
1193
1194 for (j = GET_NUM_ELEM (vec) - 1; j >= 0; --j)
1195 if (XEXP (RTVEC_ELT (vec, j), 0) == old_label)
1196 {
1197 RTVEC_ELT (vec, j) = gen_rtx_LABEL_REF (Pmode, new_label);
1198 --LABEL_NUSES (old_label);
1199 ++LABEL_NUSES (new_label);
1200 }
1201
1202 /* Handle casesi dispatch insns. */
1203 if ((tmp = single_set (insn)) != NULL
1204 && SET_DEST (tmp) == pc_rtx
1205 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
1206 && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF
1207 && label_ref_label (XEXP (SET_SRC (tmp), 2)) == old_label)
1208 {
1209 XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (Pmode,
1210 new_label);
1211 --LABEL_NUSES (old_label);
1212 ++LABEL_NUSES (new_label);
1213 }
1214 }
1215 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
1216 {
1217 int i, n = ASM_OPERANDS_LABEL_LENGTH (tmp);
1218 rtx note;
1219
1220 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1221 return false;
1222 rtx_code_label *new_label = block_label (new_bb);
1223
1224 for (i = 0; i < n; ++i)
1225 {
1226 rtx old_ref = ASM_OPERANDS_LABEL (tmp, i);
1227 gcc_assert (GET_CODE (old_ref) == LABEL_REF);
1228 if (XEXP (old_ref, 0) == old_label)
1229 {
1230 ASM_OPERANDS_LABEL (tmp, i)
1231 = gen_rtx_LABEL_REF (Pmode, new_label);
1232 --LABEL_NUSES (old_label);
1233 ++LABEL_NUSES (new_label);
1234 }
1235 }
1236
1237 if (JUMP_LABEL (insn) == old_label)
1238 {
1239 JUMP_LABEL (insn) = new_label;
1240 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1241 if (note)
1242 remove_note (insn, note);
1243 }
1244 else
1245 {
1246 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1247 if (note)
1248 remove_note (insn, note);
1249 if (JUMP_LABEL (insn) != new_label
1250 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1251 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1252 }
1253 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1254 != NULL_RTX)
1255 XEXP (note, 0) = new_label;
1256 }
1257 else
1258 {
1259 /* ?? We may play the games with moving the named labels from
1260 one basic block to the other in case only one computed_jump is
1261 available. */
1262 if (computed_jump_p (insn)
1263 /* A return instruction can't be redirected. */
1264 || returnjump_p (insn))
1265 return false;
1266
1267 if (!currently_expanding_to_rtl || JUMP_LABEL (insn) == old_label)
1268 {
1269 /* If the insn doesn't go where we think, we're confused. */
1270 gcc_assert (JUMP_LABEL (insn) == old_label);
1271
1272 /* If the substitution doesn't succeed, die. This can happen
1273 if the back end emitted unrecognizable instructions or if
1274 target is exit block on some arches. Or for crossing
1275 jumps. */
1276 if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
1277 block_label (new_bb), 0))
1278 {
1279 gcc_assert (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
1280 || CROSSING_JUMP_P (insn));
1281 return false;
1282 }
1283 }
1284 }
1285 return true;
1286 }
1287
1288
1289 /* Redirect edge representing branch of (un)conditional jump or tablejump,
1290 NULL on failure */
1291 static edge
redirect_branch_edge(edge e,basic_block target)1292 redirect_branch_edge (edge e, basic_block target)
1293 {
1294 rtx_insn *old_label = BB_HEAD (e->dest);
1295 basic_block src = e->src;
1296 rtx_insn *insn = BB_END (src);
1297
1298 /* We can only redirect non-fallthru edges of jump insn. */
1299 if (e->flags & EDGE_FALLTHRU)
1300 return NULL;
1301 else if (!JUMP_P (insn) && !currently_expanding_to_rtl)
1302 return NULL;
1303
1304 if (!currently_expanding_to_rtl)
1305 {
1306 if (!patch_jump_insn (as_a <rtx_jump_insn *> (insn), old_label, target))
1307 return NULL;
1308 }
1309 else
1310 /* When expanding this BB might actually contain multiple
1311 jumps (i.e. not yet split by find_many_sub_basic_blocks).
1312 Redirect all of those that match our label. */
1313 FOR_BB_INSNS (src, insn)
1314 if (JUMP_P (insn) && !patch_jump_insn (as_a <rtx_jump_insn *> (insn),
1315 old_label, target))
1316 return NULL;
1317
1318 if (dump_file)
1319 fprintf (dump_file, "Edge %i->%i redirected to %i\n",
1320 e->src->index, e->dest->index, target->index);
1321
1322 if (e->dest != target)
1323 e = redirect_edge_succ_nodup (e, target);
1324
1325 return e;
1326 }
1327
1328 /* Called when edge E has been redirected to a new destination,
1329 in order to update the region crossing flag on the edge and
1330 jump. */
1331
1332 static void
fixup_partition_crossing(edge e)1333 fixup_partition_crossing (edge e)
1334 {
1335 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || e->dest
1336 == EXIT_BLOCK_PTR_FOR_FN (cfun))
1337 return;
1338 /* If we redirected an existing edge, it may already be marked
1339 crossing, even though the new src is missing a reg crossing note.
1340 But make sure reg crossing note doesn't already exist before
1341 inserting. */
1342 if (BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1343 {
1344 e->flags |= EDGE_CROSSING;
1345 if (JUMP_P (BB_END (e->src)))
1346 CROSSING_JUMP_P (BB_END (e->src)) = 1;
1347 }
1348 else if (BB_PARTITION (e->src) == BB_PARTITION (e->dest))
1349 {
1350 e->flags &= ~EDGE_CROSSING;
1351 /* Remove the section crossing note from jump at end of
1352 src if it exists, and if no other successors are
1353 still crossing. */
1354 if (JUMP_P (BB_END (e->src)) && CROSSING_JUMP_P (BB_END (e->src)))
1355 {
1356 bool has_crossing_succ = false;
1357 edge e2;
1358 edge_iterator ei;
1359 FOR_EACH_EDGE (e2, ei, e->src->succs)
1360 {
1361 has_crossing_succ |= (e2->flags & EDGE_CROSSING);
1362 if (has_crossing_succ)
1363 break;
1364 }
1365 if (!has_crossing_succ)
1366 CROSSING_JUMP_P (BB_END (e->src)) = 0;
1367 }
1368 }
1369 }
1370
1371 /* Called when block BB has been reassigned to the cold partition,
1372 because it is now dominated by another cold block,
1373 to ensure that the region crossing attributes are updated. */
1374
1375 static void
fixup_new_cold_bb(basic_block bb)1376 fixup_new_cold_bb (basic_block bb)
1377 {
1378 edge e;
1379 edge_iterator ei;
1380
1381 /* This is called when a hot bb is found to now be dominated
1382 by a cold bb and therefore needs to become cold. Therefore,
1383 its preds will no longer be region crossing. Any non-dominating
1384 preds that were previously hot would also have become cold
1385 in the caller for the same region. Any preds that were previously
1386 region-crossing will be adjusted in fixup_partition_crossing. */
1387 FOR_EACH_EDGE (e, ei, bb->preds)
1388 {
1389 fixup_partition_crossing (e);
1390 }
1391
1392 /* Possibly need to make bb's successor edges region crossing,
1393 or remove stale region crossing. */
1394 FOR_EACH_EDGE (e, ei, bb->succs)
1395 {
1396 /* We can't have fall-through edges across partition boundaries.
1397 Note that force_nonfallthru will do any necessary partition
1398 boundary fixup by calling fixup_partition_crossing itself. */
1399 if ((e->flags & EDGE_FALLTHRU)
1400 && BB_PARTITION (bb) != BB_PARTITION (e->dest)
1401 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1402 force_nonfallthru (e);
1403 else
1404 fixup_partition_crossing (e);
1405 }
1406 }
1407
1408 /* Attempt to change code to redirect edge E to TARGET. Don't do that on
1409 expense of adding new instructions or reordering basic blocks.
1410
1411 Function can be also called with edge destination equivalent to the TARGET.
1412 Then it should try the simplifications and do nothing if none is possible.
1413
1414 Return edge representing the branch if transformation succeeded. Return NULL
1415 on failure.
1416 We still return NULL in case E already destinated TARGET and we didn't
1417 managed to simplify instruction stream. */
1418
1419 static edge
rtl_redirect_edge_and_branch(edge e,basic_block target)1420 rtl_redirect_edge_and_branch (edge e, basic_block target)
1421 {
1422 edge ret;
1423 basic_block src = e->src;
1424 basic_block dest = e->dest;
1425
1426 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
1427 return NULL;
1428
1429 if (dest == target)
1430 return e;
1431
1432 if ((ret = try_redirect_by_replacing_jump (e, target, false)) != NULL)
1433 {
1434 df_set_bb_dirty (src);
1435 fixup_partition_crossing (ret);
1436 return ret;
1437 }
1438
1439 ret = redirect_branch_edge (e, target);
1440 if (!ret)
1441 return NULL;
1442
1443 df_set_bb_dirty (src);
1444 fixup_partition_crossing (ret);
1445 return ret;
1446 }
1447
1448 /* Emit a barrier after BB, into the footer if we are in CFGLAYOUT mode. */
1449
1450 void
emit_barrier_after_bb(basic_block bb)1451 emit_barrier_after_bb (basic_block bb)
1452 {
1453 rtx_barrier *barrier = emit_barrier_after (BB_END (bb));
1454 gcc_assert (current_ir_type () == IR_RTL_CFGRTL
1455 || current_ir_type () == IR_RTL_CFGLAYOUT);
1456 if (current_ir_type () == IR_RTL_CFGLAYOUT)
1457 {
1458 rtx_insn *insn = unlink_insn_chain (barrier, barrier);
1459
1460 if (BB_FOOTER (bb))
1461 {
1462 rtx_insn *footer_tail = BB_FOOTER (bb);
1463
1464 while (NEXT_INSN (footer_tail))
1465 footer_tail = NEXT_INSN (footer_tail);
1466 if (!BARRIER_P (footer_tail))
1467 {
1468 SET_NEXT_INSN (footer_tail) = insn;
1469 SET_PREV_INSN (insn) = footer_tail;
1470 }
1471 }
1472 else
1473 BB_FOOTER (bb) = insn;
1474 }
1475 }
1476
1477 /* Like force_nonfallthru below, but additionally performs redirection
1478 Used by redirect_edge_and_branch_force. JUMP_LABEL is used only
1479 when redirecting to the EXIT_BLOCK, it is either ret_rtx or
1480 simple_return_rtx, indicating which kind of returnjump to create.
1481 It should be NULL otherwise. */
1482
1483 basic_block
force_nonfallthru_and_redirect(edge e,basic_block target,rtx jump_label)1484 force_nonfallthru_and_redirect (edge e, basic_block target, rtx jump_label)
1485 {
1486 basic_block jump_block, new_bb = NULL, src = e->src;
1487 rtx note;
1488 edge new_edge;
1489 int abnormal_edge_flags = 0;
1490 bool asm_goto_edge = false;
1491 int loc;
1492
1493 /* In the case the last instruction is conditional jump to the next
1494 instruction, first redirect the jump itself and then continue
1495 by creating a basic block afterwards to redirect fallthru edge. */
1496 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1497 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1498 && any_condjump_p (BB_END (e->src))
1499 && JUMP_LABEL (BB_END (e->src)) == BB_HEAD (e->dest))
1500 {
1501 rtx note;
1502 edge b = unchecked_make_edge (e->src, target, 0);
1503 bool redirected;
1504
1505 redirected = redirect_jump (as_a <rtx_jump_insn *> (BB_END (e->src)),
1506 block_label (target), 0);
1507 gcc_assert (redirected);
1508
1509 note = find_reg_note (BB_END (e->src), REG_BR_PROB, NULL_RTX);
1510 if (note)
1511 {
1512 int prob = XINT (note, 0);
1513
1514 b->probability = profile_probability::from_reg_br_prob_note (prob);
1515 e->probability -= e->probability;
1516 }
1517 }
1518
1519 if (e->flags & EDGE_ABNORMAL)
1520 {
1521 /* Irritating special case - fallthru edge to the same block as abnormal
1522 edge.
1523 We can't redirect abnormal edge, but we still can split the fallthru
1524 one and create separate abnormal edge to original destination.
1525 This allows bb-reorder to make such edge non-fallthru. */
1526 gcc_assert (e->dest == target);
1527 abnormal_edge_flags = e->flags & ~EDGE_FALLTHRU;
1528 e->flags &= EDGE_FALLTHRU;
1529 }
1530 else
1531 {
1532 gcc_assert (e->flags & EDGE_FALLTHRU);
1533 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1534 {
1535 /* We can't redirect the entry block. Create an empty block
1536 at the start of the function which we use to add the new
1537 jump. */
1538 edge tmp;
1539 edge_iterator ei;
1540 bool found = false;
1541
1542 basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL,
1543 ENTRY_BLOCK_PTR_FOR_FN (cfun));
1544 bb->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
1545
1546 /* Make sure new block ends up in correct hot/cold section. */
1547 BB_COPY_PARTITION (bb, e->dest);
1548
1549 /* Change the existing edge's source to be the new block, and add
1550 a new edge from the entry block to the new block. */
1551 e->src = bb;
1552 for (ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
1553 (tmp = ei_safe_edge (ei)); )
1554 {
1555 if (tmp == e)
1556 {
1557 ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs->unordered_remove (ei.index);
1558 found = true;
1559 break;
1560 }
1561 else
1562 ei_next (&ei);
1563 }
1564
1565 gcc_assert (found);
1566
1567 vec_safe_push (bb->succs, e);
1568 make_single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb,
1569 EDGE_FALLTHRU);
1570 }
1571 }
1572
1573 /* If e->src ends with asm goto, see if any of the ASM_OPERANDS_LABELs
1574 don't point to the target or fallthru label. */
1575 if (JUMP_P (BB_END (e->src))
1576 && target != EXIT_BLOCK_PTR_FOR_FN (cfun)
1577 && (e->flags & EDGE_FALLTHRU)
1578 && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
1579 {
1580 int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
1581 bool adjust_jump_target = false;
1582
1583 for (i = 0; i < n; ++i)
1584 {
1585 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (e->dest))
1586 {
1587 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))--;
1588 XEXP (ASM_OPERANDS_LABEL (note, i), 0) = block_label (target);
1589 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))++;
1590 adjust_jump_target = true;
1591 }
1592 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (target))
1593 asm_goto_edge = true;
1594 }
1595 if (adjust_jump_target)
1596 {
1597 rtx_insn *insn = BB_END (e->src);
1598 rtx note;
1599 rtx_insn *old_label = BB_HEAD (e->dest);
1600 rtx_insn *new_label = BB_HEAD (target);
1601
1602 if (JUMP_LABEL (insn) == old_label)
1603 {
1604 JUMP_LABEL (insn) = new_label;
1605 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1606 if (note)
1607 remove_note (insn, note);
1608 }
1609 else
1610 {
1611 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1612 if (note)
1613 remove_note (insn, note);
1614 if (JUMP_LABEL (insn) != new_label
1615 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1616 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1617 }
1618 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1619 != NULL_RTX)
1620 XEXP (note, 0) = new_label;
1621 }
1622 }
1623
1624 if (EDGE_COUNT (e->src->succs) >= 2 || abnormal_edge_flags || asm_goto_edge)
1625 {
1626 rtx_insn *new_head;
1627 profile_count count = e->count ();
1628 profile_probability probability = e->probability;
1629 /* Create the new structures. */
1630
1631 /* If the old block ended with a tablejump, skip its table
1632 by searching forward from there. Otherwise start searching
1633 forward from the last instruction of the old block. */
1634 rtx_jump_table_data *table;
1635 if (tablejump_p (BB_END (e->src), NULL, &table))
1636 new_head = table;
1637 else
1638 new_head = BB_END (e->src);
1639 new_head = NEXT_INSN (new_head);
1640
1641 jump_block = create_basic_block (new_head, NULL, e->src);
1642 jump_block->count = count;
1643
1644 /* Make sure new block ends up in correct hot/cold section. */
1645
1646 BB_COPY_PARTITION (jump_block, e->src);
1647
1648 /* Wire edge in. */
1649 new_edge = make_edge (e->src, jump_block, EDGE_FALLTHRU);
1650 new_edge->probability = probability;
1651
1652 /* Redirect old edge. */
1653 redirect_edge_pred (e, jump_block);
1654 e->probability = profile_probability::always ();
1655
1656 /* If e->src was previously region crossing, it no longer is
1657 and the reg crossing note should be removed. */
1658 fixup_partition_crossing (new_edge);
1659
1660 /* If asm goto has any label refs to target's label,
1661 add also edge from asm goto bb to target. */
1662 if (asm_goto_edge)
1663 {
1664 new_edge->probability = new_edge->probability.apply_scale (1, 2);
1665 jump_block->count = jump_block->count.apply_scale (1, 2);
1666 edge new_edge2 = make_edge (new_edge->src, target,
1667 e->flags & ~EDGE_FALLTHRU);
1668 new_edge2->probability = probability - new_edge->probability;
1669 }
1670
1671 new_bb = jump_block;
1672 }
1673 else
1674 jump_block = e->src;
1675
1676 loc = e->goto_locus;
1677 e->flags &= ~EDGE_FALLTHRU;
1678 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1679 {
1680 if (jump_label == ret_rtx)
1681 emit_jump_insn_after_setloc (targetm.gen_return (),
1682 BB_END (jump_block), loc);
1683 else
1684 {
1685 gcc_assert (jump_label == simple_return_rtx);
1686 emit_jump_insn_after_setloc (targetm.gen_simple_return (),
1687 BB_END (jump_block), loc);
1688 }
1689 set_return_jump_label (BB_END (jump_block));
1690 }
1691 else
1692 {
1693 rtx_code_label *label = block_label (target);
1694 emit_jump_insn_after_setloc (targetm.gen_jump (label),
1695 BB_END (jump_block), loc);
1696 JUMP_LABEL (BB_END (jump_block)) = label;
1697 LABEL_NUSES (label)++;
1698 }
1699
1700 /* We might be in cfg layout mode, and if so, the following routine will
1701 insert the barrier correctly. */
1702 emit_barrier_after_bb (jump_block);
1703 redirect_edge_succ_nodup (e, target);
1704
1705 if (abnormal_edge_flags)
1706 make_edge (src, target, abnormal_edge_flags);
1707
1708 df_mark_solutions_dirty ();
1709 fixup_partition_crossing (e);
1710 return new_bb;
1711 }
1712
1713 /* Edge E is assumed to be fallthru edge. Emit needed jump instruction
1714 (and possibly create new basic block) to make edge non-fallthru.
1715 Return newly created BB or NULL if none. */
1716
1717 static basic_block
rtl_force_nonfallthru(edge e)1718 rtl_force_nonfallthru (edge e)
1719 {
1720 return force_nonfallthru_and_redirect (e, e->dest, NULL_RTX);
1721 }
1722
1723 /* Redirect edge even at the expense of creating new jump insn or
1724 basic block. Return new basic block if created, NULL otherwise.
1725 Conversion must be possible. */
1726
1727 static basic_block
rtl_redirect_edge_and_branch_force(edge e,basic_block target)1728 rtl_redirect_edge_and_branch_force (edge e, basic_block target)
1729 {
1730 if (redirect_edge_and_branch (e, target)
1731 || e->dest == target)
1732 return NULL;
1733
1734 /* In case the edge redirection failed, try to force it to be non-fallthru
1735 and redirect newly created simplejump. */
1736 df_set_bb_dirty (e->src);
1737 return force_nonfallthru_and_redirect (e, target, NULL_RTX);
1738 }
1739
1740 /* The given edge should potentially be a fallthru edge. If that is in
1741 fact true, delete the jump and barriers that are in the way. */
1742
1743 static void
rtl_tidy_fallthru_edge(edge e)1744 rtl_tidy_fallthru_edge (edge e)
1745 {
1746 rtx_insn *q;
1747 basic_block b = e->src, c = b->next_bb;
1748
1749 /* ??? In a late-running flow pass, other folks may have deleted basic
1750 blocks by nopping out blocks, leaving multiple BARRIERs between here
1751 and the target label. They ought to be chastised and fixed.
1752
1753 We can also wind up with a sequence of undeletable labels between
1754 one block and the next.
1755
1756 So search through a sequence of barriers, labels, and notes for
1757 the head of block C and assert that we really do fall through. */
1758
1759 for (q = NEXT_INSN (BB_END (b)); q != BB_HEAD (c); q = NEXT_INSN (q))
1760 if (NONDEBUG_INSN_P (q))
1761 return;
1762
1763 /* Remove what will soon cease being the jump insn from the source block.
1764 If block B consisted only of this single jump, turn it into a deleted
1765 note. */
1766 q = BB_END (b);
1767 if (JUMP_P (q)
1768 && onlyjump_p (q)
1769 && (any_uncondjump_p (q)
1770 || single_succ_p (b)))
1771 {
1772 rtx_insn *label;
1773 rtx_jump_table_data *table;
1774
1775 if (tablejump_p (q, &label, &table))
1776 {
1777 /* The label is likely mentioned in some instruction before
1778 the tablejump and might not be DCEd, so turn it into
1779 a note instead and move before the tablejump that is going to
1780 be deleted. */
1781 const char *name = LABEL_NAME (label);
1782 PUT_CODE (label, NOTE);
1783 NOTE_KIND (label) = NOTE_INSN_DELETED_LABEL;
1784 NOTE_DELETED_LABEL_NAME (label) = name;
1785 reorder_insns (label, label, PREV_INSN (q));
1786 delete_insn (table);
1787 }
1788
1789 /* If this was a conditional jump, we need to also delete
1790 the insn that set cc0. */
1791 if (HAVE_cc0 && any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q)))
1792 q = PREV_INSN (q);
1793
1794 q = PREV_INSN (q);
1795 }
1796 /* Unconditional jumps with side-effects (i.e. which we can't just delete
1797 together with the barrier) should never have a fallthru edge. */
1798 else if (JUMP_P (q) && any_uncondjump_p (q))
1799 return;
1800
1801 /* Selectively unlink the sequence. */
1802 if (q != PREV_INSN (BB_HEAD (c)))
1803 delete_insn_chain (NEXT_INSN (q), PREV_INSN (BB_HEAD (c)), false);
1804
1805 e->flags |= EDGE_FALLTHRU;
1806 }
1807
1808 /* Should move basic block BB after basic block AFTER. NIY. */
1809
1810 static bool
rtl_move_block_after(basic_block bb ATTRIBUTE_UNUSED,basic_block after ATTRIBUTE_UNUSED)1811 rtl_move_block_after (basic_block bb ATTRIBUTE_UNUSED,
1812 basic_block after ATTRIBUTE_UNUSED)
1813 {
1814 return false;
1815 }
1816
1817 /* Locate the last bb in the same partition as START_BB. */
1818
1819 static basic_block
last_bb_in_partition(basic_block start_bb)1820 last_bb_in_partition (basic_block start_bb)
1821 {
1822 basic_block bb;
1823 FOR_BB_BETWEEN (bb, start_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
1824 {
1825 if (BB_PARTITION (start_bb) != BB_PARTITION (bb->next_bb))
1826 return bb;
1827 }
1828 /* Return bb before the exit block. */
1829 return bb->prev_bb;
1830 }
1831
1832 /* Split a (typically critical) edge. Return the new block.
1833 The edge must not be abnormal.
1834
1835 ??? The code generally expects to be called on critical edges.
1836 The case of a block ending in an unconditional jump to a
1837 block with multiple predecessors is not handled optimally. */
1838
1839 static basic_block
rtl_split_edge(edge edge_in)1840 rtl_split_edge (edge edge_in)
1841 {
1842 basic_block bb, new_bb;
1843 rtx_insn *before;
1844
1845 /* Abnormal edges cannot be split. */
1846 gcc_assert (!(edge_in->flags & EDGE_ABNORMAL));
1847
1848 /* We are going to place the new block in front of edge destination.
1849 Avoid existence of fallthru predecessors. */
1850 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1851 {
1852 edge e = find_fallthru_edge (edge_in->dest->preds);
1853
1854 if (e)
1855 force_nonfallthru (e);
1856 }
1857
1858 /* Create the basic block note. */
1859 if (edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1860 before = BB_HEAD (edge_in->dest);
1861 else
1862 before = NULL;
1863
1864 /* If this is a fall through edge to the exit block, the blocks might be
1865 not adjacent, and the right place is after the source. */
1866 if ((edge_in->flags & EDGE_FALLTHRU)
1867 && edge_in->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1868 {
1869 before = NEXT_INSN (BB_END (edge_in->src));
1870 bb = create_basic_block (before, NULL, edge_in->src);
1871 BB_COPY_PARTITION (bb, edge_in->src);
1872 }
1873 else
1874 {
1875 if (edge_in->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1876 {
1877 bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
1878 BB_COPY_PARTITION (bb, edge_in->dest);
1879 }
1880 else
1881 {
1882 basic_block after = edge_in->dest->prev_bb;
1883 /* If this is post-bb reordering, and the edge crosses a partition
1884 boundary, the new block needs to be inserted in the bb chain
1885 at the end of the src partition (since we put the new bb into
1886 that partition, see below). Otherwise we may end up creating
1887 an extra partition crossing in the chain, which is illegal.
1888 It can't go after the src, because src may have a fall-through
1889 to a different block. */
1890 if (crtl->bb_reorder_complete
1891 && (edge_in->flags & EDGE_CROSSING))
1892 {
1893 after = last_bb_in_partition (edge_in->src);
1894 before = get_last_bb_insn (after);
1895 /* The instruction following the last bb in partition should
1896 be a barrier, since it cannot end in a fall-through. */
1897 gcc_checking_assert (BARRIER_P (before));
1898 before = NEXT_INSN (before);
1899 }
1900 bb = create_basic_block (before, NULL, after);
1901 /* Put the split bb into the src partition, to avoid creating
1902 a situation where a cold bb dominates a hot bb, in the case
1903 where src is cold and dest is hot. The src will dominate
1904 the new bb (whereas it might not have dominated dest). */
1905 BB_COPY_PARTITION (bb, edge_in->src);
1906 }
1907 }
1908
1909 make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU);
1910
1911 /* Can't allow a region crossing edge to be fallthrough. */
1912 if (BB_PARTITION (bb) != BB_PARTITION (edge_in->dest)
1913 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1914 {
1915 new_bb = force_nonfallthru (single_succ_edge (bb));
1916 gcc_assert (!new_bb);
1917 }
1918
1919 /* For non-fallthru edges, we must adjust the predecessor's
1920 jump instruction to target our new block. */
1921 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1922 {
1923 edge redirected = redirect_edge_and_branch (edge_in, bb);
1924 gcc_assert (redirected);
1925 }
1926 else
1927 {
1928 if (edge_in->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
1929 {
1930 /* For asm goto even splitting of fallthru edge might
1931 need insn patching, as other labels might point to the
1932 old label. */
1933 rtx_insn *last = BB_END (edge_in->src);
1934 if (last
1935 && JUMP_P (last)
1936 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1937 && (extract_asm_operands (PATTERN (last))
1938 || JUMP_LABEL (last) == before)
1939 && patch_jump_insn (last, before, bb))
1940 df_set_bb_dirty (edge_in->src);
1941 }
1942 redirect_edge_succ (edge_in, bb);
1943 }
1944
1945 return bb;
1946 }
1947
1948 /* Queue instructions for insertion on an edge between two basic blocks.
1949 The new instructions and basic blocks (if any) will not appear in the
1950 CFG until commit_edge_insertions is called. */
1951
1952 void
insert_insn_on_edge(rtx pattern,edge e)1953 insert_insn_on_edge (rtx pattern, edge e)
1954 {
1955 /* We cannot insert instructions on an abnormal critical edge.
1956 It will be easier to find the culprit if we die now. */
1957 gcc_assert (!((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)));
1958
1959 if (e->insns.r == NULL_RTX)
1960 start_sequence ();
1961 else
1962 push_to_sequence (e->insns.r);
1963
1964 emit_insn (pattern);
1965
1966 e->insns.r = get_insns ();
1967 end_sequence ();
1968 }
1969
1970 /* Update the CFG for the instructions queued on edge E. */
1971
1972 void
commit_one_edge_insertion(edge e)1973 commit_one_edge_insertion (edge e)
1974 {
1975 rtx_insn *before = NULL, *after = NULL, *insns, *tmp, *last;
1976 basic_block bb;
1977
1978 /* Pull the insns off the edge now since the edge might go away. */
1979 insns = e->insns.r;
1980 e->insns.r = NULL;
1981
1982 /* Figure out where to put these insns. If the destination has
1983 one predecessor, insert there. Except for the exit block. */
1984 if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1985 {
1986 bb = e->dest;
1987
1988 /* Get the location correct wrt a code label, and "nice" wrt
1989 a basic block note, and before everything else. */
1990 tmp = BB_HEAD (bb);
1991 if (LABEL_P (tmp))
1992 tmp = NEXT_INSN (tmp);
1993 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
1994 tmp = NEXT_INSN (tmp);
1995 if (tmp == BB_HEAD (bb))
1996 before = tmp;
1997 else if (tmp)
1998 after = PREV_INSN (tmp);
1999 else
2000 after = get_last_insn ();
2001 }
2002
2003 /* If the source has one successor and the edge is not abnormal,
2004 insert there. Except for the entry block.
2005 Don't do this if the predecessor ends in a jump other than
2006 unconditional simple jump. E.g. for asm goto that points all
2007 its labels at the fallthru basic block, we can't insert instructions
2008 before the asm goto, as the asm goto can have various of side effects,
2009 and can't emit instructions after the asm goto, as it must end
2010 the basic block. */
2011 else if ((e->flags & EDGE_ABNORMAL) == 0
2012 && single_succ_p (e->src)
2013 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2014 && (!JUMP_P (BB_END (e->src))
2015 || simplejump_p (BB_END (e->src))))
2016 {
2017 bb = e->src;
2018
2019 /* It is possible to have a non-simple jump here. Consider a target
2020 where some forms of unconditional jumps clobber a register. This
2021 happens on the fr30 for example.
2022
2023 We know this block has a single successor, so we can just emit
2024 the queued insns before the jump. */
2025 if (JUMP_P (BB_END (bb)))
2026 before = BB_END (bb);
2027 else
2028 {
2029 /* We'd better be fallthru, or we've lost track of what's what. */
2030 gcc_assert (e->flags & EDGE_FALLTHRU);
2031
2032 after = BB_END (bb);
2033 }
2034 }
2035
2036 /* Otherwise we must split the edge. */
2037 else
2038 {
2039 bb = split_edge (e);
2040
2041 /* If E crossed a partition boundary, we needed to make bb end in
2042 a region-crossing jump, even though it was originally fallthru. */
2043 if (JUMP_P (BB_END (bb)))
2044 before = BB_END (bb);
2045 else
2046 after = BB_END (bb);
2047 }
2048
2049 /* Now that we've found the spot, do the insertion. */
2050 if (before)
2051 {
2052 emit_insn_before_noloc (insns, before, bb);
2053 last = prev_nonnote_insn (before);
2054 }
2055 else
2056 last = emit_insn_after_noloc (insns, after, bb);
2057
2058 if (returnjump_p (last))
2059 {
2060 /* ??? Remove all outgoing edges from BB and add one for EXIT.
2061 This is not currently a problem because this only happens
2062 for the (single) epilogue, which already has a fallthru edge
2063 to EXIT. */
2064
2065 e = single_succ_edge (bb);
2066 gcc_assert (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
2067 && single_succ_p (bb) && (e->flags & EDGE_FALLTHRU));
2068
2069 e->flags &= ~EDGE_FALLTHRU;
2070 emit_barrier_after (last);
2071
2072 if (before)
2073 delete_insn (before);
2074 }
2075 else
2076 gcc_assert (!JUMP_P (last));
2077 }
2078
2079 /* Update the CFG for all queued instructions. */
2080
2081 void
commit_edge_insertions(void)2082 commit_edge_insertions (void)
2083 {
2084 basic_block bb;
2085
2086 /* Optimization passes that invoke this routine can cause hot blocks
2087 previously reached by both hot and cold blocks to become dominated only
2088 by cold blocks. This will cause the verification below to fail,
2089 and lead to now cold code in the hot section. In some cases this
2090 may only be visible after newly unreachable blocks are deleted,
2091 which will be done by fixup_partitions. */
2092 fixup_partitions ();
2093
2094 checking_verify_flow_info ();
2095
2096 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
2097 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
2098 {
2099 edge e;
2100 edge_iterator ei;
2101
2102 FOR_EACH_EDGE (e, ei, bb->succs)
2103 if (e->insns.r)
2104 commit_one_edge_insertion (e);
2105 }
2106 }
2107
2108
2109 /* Print out RTL-specific basic block information (live information
2110 at start and end with TDF_DETAILS). FLAGS are the TDF_* masks
2111 documented in dumpfile.h. */
2112
2113 static void
rtl_dump_bb(FILE * outf,basic_block bb,int indent,dump_flags_t flags)2114 rtl_dump_bb (FILE *outf, basic_block bb, int indent, dump_flags_t flags)
2115 {
2116 char *s_indent;
2117
2118 s_indent = (char *) alloca ((size_t) indent + 1);
2119 memset (s_indent, ' ', (size_t) indent);
2120 s_indent[indent] = '\0';
2121
2122 if (df && (flags & TDF_DETAILS))
2123 {
2124 df_dump_top (bb, outf);
2125 putc ('\n', outf);
2126 }
2127
2128 if (bb->index != ENTRY_BLOCK && bb->index != EXIT_BLOCK)
2129 {
2130 rtx_insn *last = BB_END (bb);
2131 if (last)
2132 last = NEXT_INSN (last);
2133 for (rtx_insn *insn = BB_HEAD (bb); insn != last; insn = NEXT_INSN (insn))
2134 {
2135 if (flags & TDF_DETAILS)
2136 df_dump_insn_top (insn, outf);
2137 if (! (flags & TDF_SLIM))
2138 print_rtl_single (outf, insn);
2139 else
2140 dump_insn_slim (outf, insn);
2141 if (flags & TDF_DETAILS)
2142 df_dump_insn_bottom (insn, outf);
2143 }
2144 }
2145
2146 if (df && (flags & TDF_DETAILS))
2147 {
2148 df_dump_bottom (bb, outf);
2149 putc ('\n', outf);
2150 }
2151
2152 }
2153
2154 /* Like dump_function_to_file, but for RTL. Print out dataflow information
2155 for the start of each basic block. FLAGS are the TDF_* masks documented
2156 in dumpfile.h. */
2157
2158 void
print_rtl_with_bb(FILE * outf,const rtx_insn * rtx_first,dump_flags_t flags)2159 print_rtl_with_bb (FILE *outf, const rtx_insn *rtx_first, dump_flags_t flags)
2160 {
2161 const rtx_insn *tmp_rtx;
2162 if (rtx_first == 0)
2163 fprintf (outf, "(nil)\n");
2164 else
2165 {
2166 enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
2167 int max_uid = get_max_uid ();
2168 basic_block *start = XCNEWVEC (basic_block, max_uid);
2169 basic_block *end = XCNEWVEC (basic_block, max_uid);
2170 enum bb_state *in_bb_p = XCNEWVEC (enum bb_state, max_uid);
2171 basic_block bb;
2172
2173 /* After freeing the CFG, we still have BLOCK_FOR_INSN set on most
2174 insns, but the CFG is not maintained so the basic block info
2175 is not reliable. Therefore it's omitted from the dumps. */
2176 if (! (cfun->curr_properties & PROP_cfg))
2177 flags &= ~TDF_BLOCKS;
2178
2179 if (df)
2180 df_dump_start (outf);
2181
2182 if (flags & TDF_BLOCKS)
2183 {
2184 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2185 {
2186 rtx_insn *x;
2187
2188 start[INSN_UID (BB_HEAD (bb))] = bb;
2189 end[INSN_UID (BB_END (bb))] = bb;
2190 for (x = BB_HEAD (bb); x != NULL_RTX; x = NEXT_INSN (x))
2191 {
2192 enum bb_state state = IN_MULTIPLE_BB;
2193
2194 if (in_bb_p[INSN_UID (x)] == NOT_IN_BB)
2195 state = IN_ONE_BB;
2196 in_bb_p[INSN_UID (x)] = state;
2197
2198 if (x == BB_END (bb))
2199 break;
2200 }
2201 }
2202 }
2203
2204 for (tmp_rtx = rtx_first; tmp_rtx != NULL; tmp_rtx = NEXT_INSN (tmp_rtx))
2205 {
2206 if (flags & TDF_BLOCKS)
2207 {
2208 bb = start[INSN_UID (tmp_rtx)];
2209 if (bb != NULL)
2210 {
2211 dump_bb_info (outf, bb, 0, dump_flags, true, false);
2212 if (df && (flags & TDF_DETAILS))
2213 df_dump_top (bb, outf);
2214 }
2215
2216 if (in_bb_p[INSN_UID (tmp_rtx)] == NOT_IN_BB
2217 && !NOTE_P (tmp_rtx)
2218 && !BARRIER_P (tmp_rtx))
2219 fprintf (outf, ";; Insn is not within a basic block\n");
2220 else if (in_bb_p[INSN_UID (tmp_rtx)] == IN_MULTIPLE_BB)
2221 fprintf (outf, ";; Insn is in multiple basic blocks\n");
2222 }
2223
2224 if (flags & TDF_DETAILS)
2225 df_dump_insn_top (tmp_rtx, outf);
2226 if (! (flags & TDF_SLIM))
2227 print_rtl_single (outf, tmp_rtx);
2228 else
2229 dump_insn_slim (outf, tmp_rtx);
2230 if (flags & TDF_DETAILS)
2231 df_dump_insn_bottom (tmp_rtx, outf);
2232
2233 if (flags & TDF_BLOCKS)
2234 {
2235 bb = end[INSN_UID (tmp_rtx)];
2236 if (bb != NULL)
2237 {
2238 dump_bb_info (outf, bb, 0, dump_flags, false, true);
2239 if (df && (flags & TDF_DETAILS))
2240 df_dump_bottom (bb, outf);
2241 putc ('\n', outf);
2242 }
2243 }
2244 }
2245
2246 free (start);
2247 free (end);
2248 free (in_bb_p);
2249 }
2250 }
2251
2252 /* Update the branch probability of BB if a REG_BR_PROB is present. */
2253
2254 void
update_br_prob_note(basic_block bb)2255 update_br_prob_note (basic_block bb)
2256 {
2257 rtx note;
2258 note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX);
2259 if (!JUMP_P (BB_END (bb)) || !BRANCH_EDGE (bb)->probability.initialized_p ())
2260 {
2261 if (note)
2262 {
2263 rtx *note_link, this_rtx;
2264
2265 note_link = ®_NOTES (BB_END (bb));
2266 for (this_rtx = *note_link; this_rtx; this_rtx = XEXP (this_rtx, 1))
2267 if (this_rtx == note)
2268 {
2269 *note_link = XEXP (this_rtx, 1);
2270 break;
2271 }
2272 }
2273 return;
2274 }
2275 if (!note
2276 || XINT (note, 0) == BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ())
2277 return;
2278 XINT (note, 0) = BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ();
2279 }
2280
2281 /* Get the last insn associated with block BB (that includes barriers and
2282 tablejumps after BB). */
2283 rtx_insn *
get_last_bb_insn(basic_block bb)2284 get_last_bb_insn (basic_block bb)
2285 {
2286 rtx_jump_table_data *table;
2287 rtx_insn *tmp;
2288 rtx_insn *end = BB_END (bb);
2289
2290 /* Include any jump table following the basic block. */
2291 if (tablejump_p (end, NULL, &table))
2292 end = table;
2293
2294 /* Include any barriers that may follow the basic block. */
2295 tmp = next_nonnote_nondebug_insn_bb (end);
2296 while (tmp && BARRIER_P (tmp))
2297 {
2298 end = tmp;
2299 tmp = next_nonnote_nondebug_insn_bb (end);
2300 }
2301
2302 return end;
2303 }
2304
2305 /* Add all BBs reachable from entry via hot paths into the SET. */
2306
2307 void
find_bbs_reachable_by_hot_paths(hash_set<basic_block> * set)2308 find_bbs_reachable_by_hot_paths (hash_set<basic_block> *set)
2309 {
2310 auto_vec<basic_block, 64> worklist;
2311
2312 set->add (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2313 worklist.safe_push (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2314
2315 while (worklist.length () > 0)
2316 {
2317 basic_block bb = worklist.pop ();
2318 edge_iterator ei;
2319 edge e;
2320
2321 FOR_EACH_EDGE (e, ei, bb->succs)
2322 if (BB_PARTITION (e->dest) != BB_COLD_PARTITION
2323 && !set->add (e->dest))
2324 worklist.safe_push (e->dest);
2325 }
2326 }
2327
2328 /* Sanity check partition hotness to ensure that basic blocks in
2329 the cold partition don't dominate basic blocks in the hot partition.
2330 If FLAG_ONLY is true, report violations as errors. Otherwise
2331 re-mark the dominated blocks as cold, since this is run after
2332 cfg optimizations that may make hot blocks previously reached
2333 by both hot and cold blocks now only reachable along cold paths. */
2334
2335 static vec<basic_block>
find_partition_fixes(bool flag_only)2336 find_partition_fixes (bool flag_only)
2337 {
2338 basic_block bb;
2339 vec<basic_block> bbs_in_cold_partition = vNULL;
2340 vec<basic_block> bbs_to_fix = vNULL;
2341 hash_set<basic_block> set;
2342
2343 /* Callers check this. */
2344 gcc_checking_assert (crtl->has_bb_partition);
2345
2346 find_bbs_reachable_by_hot_paths (&set);
2347
2348 FOR_EACH_BB_FN (bb, cfun)
2349 if (!set.contains (bb)
2350 && BB_PARTITION (bb) != BB_COLD_PARTITION)
2351 {
2352 if (flag_only)
2353 error ("non-cold basic block %d reachable only "
2354 "by paths crossing the cold partition", bb->index);
2355 else
2356 BB_SET_PARTITION (bb, BB_COLD_PARTITION);
2357 bbs_to_fix.safe_push (bb);
2358 bbs_in_cold_partition.safe_push (bb);
2359 }
2360
2361 return bbs_to_fix;
2362 }
2363
2364 /* Perform cleanup on the hot/cold bb partitioning after optimization
2365 passes that modify the cfg. */
2366
2367 void
fixup_partitions(void)2368 fixup_partitions (void)
2369 {
2370 basic_block bb;
2371
2372 if (!crtl->has_bb_partition)
2373 return;
2374
2375 /* Delete any blocks that became unreachable and weren't
2376 already cleaned up, for example during edge forwarding
2377 and convert_jumps_to_returns. This will expose more
2378 opportunities for fixing the partition boundaries here.
2379 Also, the calculation of the dominance graph during verification
2380 will assert if there are unreachable nodes. */
2381 delete_unreachable_blocks ();
2382
2383 /* If there are partitions, do a sanity check on them: A basic block in
2384 a cold partition cannot dominate a basic block in a hot partition.
2385 Fixup any that now violate this requirement, as a result of edge
2386 forwarding and unreachable block deletion. */
2387 vec<basic_block> bbs_to_fix = find_partition_fixes (false);
2388
2389 /* Do the partition fixup after all necessary blocks have been converted to
2390 cold, so that we only update the region crossings the minimum number of
2391 places, which can require forcing edges to be non fallthru. */
2392 while (! bbs_to_fix.is_empty ())
2393 {
2394 bb = bbs_to_fix.pop ();
2395 fixup_new_cold_bb (bb);
2396 }
2397 }
2398
2399 /* Verify, in the basic block chain, that there is at most one switch
2400 between hot/cold partitions. This condition will not be true until
2401 after reorder_basic_blocks is called. */
2402
2403 static int
verify_hot_cold_block_grouping(void)2404 verify_hot_cold_block_grouping (void)
2405 {
2406 basic_block bb;
2407 int err = 0;
2408 bool switched_sections = false;
2409 int current_partition = BB_UNPARTITIONED;
2410
2411 /* Even after bb reordering is complete, we go into cfglayout mode
2412 again (in compgoto). Ensure we don't call this before going back
2413 into linearized RTL when any layout fixes would have been committed. */
2414 if (!crtl->bb_reorder_complete
2415 || current_ir_type () != IR_RTL_CFGRTL)
2416 return err;
2417
2418 FOR_EACH_BB_FN (bb, cfun)
2419 {
2420 if (current_partition != BB_UNPARTITIONED
2421 && BB_PARTITION (bb) != current_partition)
2422 {
2423 if (switched_sections)
2424 {
2425 error ("multiple hot/cold transitions found (bb %i)",
2426 bb->index);
2427 err = 1;
2428 }
2429 else
2430 switched_sections = true;
2431
2432 if (!crtl->has_bb_partition)
2433 error ("partition found but function partition flag not set");
2434 }
2435 current_partition = BB_PARTITION (bb);
2436 }
2437
2438 return err;
2439 }
2440
2441
2442 /* Perform several checks on the edges out of each block, such as
2443 the consistency of the branch probabilities, the correctness
2444 of hot/cold partition crossing edges, and the number of expected
2445 successor edges. Also verify that the dominance relationship
2446 between hot/cold blocks is sane. */
2447
2448 static int
rtl_verify_edges(void)2449 rtl_verify_edges (void)
2450 {
2451 int err = 0;
2452 basic_block bb;
2453
2454 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2455 {
2456 int n_fallthru = 0, n_branch = 0, n_abnormal_call = 0, n_sibcall = 0;
2457 int n_eh = 0, n_abnormal = 0;
2458 edge e, fallthru = NULL;
2459 edge_iterator ei;
2460 rtx note;
2461 bool has_crossing_edge = false;
2462
2463 if (JUMP_P (BB_END (bb))
2464 && (note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX))
2465 && EDGE_COUNT (bb->succs) >= 2
2466 && any_condjump_p (BB_END (bb)))
2467 {
2468 if (!BRANCH_EDGE (bb)->probability.initialized_p ())
2469 {
2470 if (profile_status_for_fn (cfun) != PROFILE_ABSENT)
2471 {
2472 error ("verify_flow_info: "
2473 "REG_BR_PROB is set but cfg probability is not");
2474 err = 1;
2475 }
2476 }
2477 else if (XINT (note, 0)
2478 != BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ()
2479 && profile_status_for_fn (cfun) != PROFILE_ABSENT)
2480 {
2481 error ("verify_flow_info: REG_BR_PROB does not match cfg %i %i",
2482 XINT (note, 0),
2483 BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ());
2484 err = 1;
2485 }
2486 }
2487
2488 FOR_EACH_EDGE (e, ei, bb->succs)
2489 {
2490 bool is_crossing;
2491
2492 if (e->flags & EDGE_FALLTHRU)
2493 n_fallthru++, fallthru = e;
2494
2495 is_crossing = (BB_PARTITION (e->src) != BB_PARTITION (e->dest)
2496 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2497 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun));
2498 has_crossing_edge |= is_crossing;
2499 if (e->flags & EDGE_CROSSING)
2500 {
2501 if (!is_crossing)
2502 {
2503 error ("EDGE_CROSSING incorrectly set across same section");
2504 err = 1;
2505 }
2506 if (e->flags & EDGE_FALLTHRU)
2507 {
2508 error ("fallthru edge crosses section boundary in bb %i",
2509 e->src->index);
2510 err = 1;
2511 }
2512 if (e->flags & EDGE_EH)
2513 {
2514 error ("EH edge crosses section boundary in bb %i",
2515 e->src->index);
2516 err = 1;
2517 }
2518 if (JUMP_P (BB_END (bb)) && !CROSSING_JUMP_P (BB_END (bb)))
2519 {
2520 error ("No region crossing jump at section boundary in bb %i",
2521 bb->index);
2522 err = 1;
2523 }
2524 }
2525 else if (is_crossing)
2526 {
2527 error ("EDGE_CROSSING missing across section boundary");
2528 err = 1;
2529 }
2530
2531 if ((e->flags & ~(EDGE_DFS_BACK
2532 | EDGE_CAN_FALLTHRU
2533 | EDGE_IRREDUCIBLE_LOOP
2534 | EDGE_LOOP_EXIT
2535 | EDGE_CROSSING
2536 | EDGE_PRESERVE)) == 0)
2537 n_branch++;
2538
2539 if (e->flags & EDGE_ABNORMAL_CALL)
2540 n_abnormal_call++;
2541
2542 if (e->flags & EDGE_SIBCALL)
2543 n_sibcall++;
2544
2545 if (e->flags & EDGE_EH)
2546 n_eh++;
2547
2548 if (e->flags & EDGE_ABNORMAL)
2549 n_abnormal++;
2550 }
2551
2552 if (!has_crossing_edge
2553 && JUMP_P (BB_END (bb))
2554 && CROSSING_JUMP_P (BB_END (bb)))
2555 {
2556 print_rtl_with_bb (stderr, get_insns (), TDF_BLOCKS | TDF_DETAILS);
2557 error ("Region crossing jump across same section in bb %i",
2558 bb->index);
2559 err = 1;
2560 }
2561
2562 if (n_eh && !find_reg_note (BB_END (bb), REG_EH_REGION, NULL_RTX))
2563 {
2564 error ("missing REG_EH_REGION note at the end of bb %i", bb->index);
2565 err = 1;
2566 }
2567 if (n_eh > 1)
2568 {
2569 error ("too many exception handling edges in bb %i", bb->index);
2570 err = 1;
2571 }
2572 if (n_branch
2573 && (!JUMP_P (BB_END (bb))
2574 || (n_branch > 1 && (any_uncondjump_p (BB_END (bb))
2575 || any_condjump_p (BB_END (bb))))))
2576 {
2577 error ("too many outgoing branch edges from bb %i", bb->index);
2578 err = 1;
2579 }
2580 if (n_fallthru && any_uncondjump_p (BB_END (bb)))
2581 {
2582 error ("fallthru edge after unconditional jump in bb %i", bb->index);
2583 err = 1;
2584 }
2585 if (n_branch != 1 && any_uncondjump_p (BB_END (bb)))
2586 {
2587 error ("wrong number of branch edges after unconditional jump"
2588 " in bb %i", bb->index);
2589 err = 1;
2590 }
2591 if (n_branch != 1 && any_condjump_p (BB_END (bb))
2592 && JUMP_LABEL (BB_END (bb)) != BB_HEAD (fallthru->dest))
2593 {
2594 error ("wrong amount of branch edges after conditional jump"
2595 " in bb %i", bb->index);
2596 err = 1;
2597 }
2598 if (n_abnormal_call && !CALL_P (BB_END (bb)))
2599 {
2600 error ("abnormal call edges for non-call insn in bb %i", bb->index);
2601 err = 1;
2602 }
2603 if (n_sibcall && !CALL_P (BB_END (bb)))
2604 {
2605 error ("sibcall edges for non-call insn in bb %i", bb->index);
2606 err = 1;
2607 }
2608 if (n_abnormal > n_eh
2609 && !(CALL_P (BB_END (bb))
2610 && n_abnormal == n_abnormal_call + n_sibcall)
2611 && (!JUMP_P (BB_END (bb))
2612 || any_condjump_p (BB_END (bb))
2613 || any_uncondjump_p (BB_END (bb))))
2614 {
2615 error ("abnormal edges for no purpose in bb %i", bb->index);
2616 err = 1;
2617 }
2618 }
2619
2620 /* If there are partitions, do a sanity check on them: A basic block in
2621 a cold partition cannot dominate a basic block in a hot partition. */
2622 if (crtl->has_bb_partition && !err
2623 && current_ir_type () == IR_RTL_CFGLAYOUT)
2624 {
2625 vec<basic_block> bbs_to_fix = find_partition_fixes (true);
2626 err = !bbs_to_fix.is_empty ();
2627 }
2628
2629 /* Clean up. */
2630 return err;
2631 }
2632
2633 /* Checks on the instructions within blocks. Currently checks that each
2634 block starts with a basic block note, and that basic block notes and
2635 control flow jumps are not found in the middle of the block. */
2636
2637 static int
rtl_verify_bb_insns(void)2638 rtl_verify_bb_insns (void)
2639 {
2640 rtx_insn *x;
2641 int err = 0;
2642 basic_block bb;
2643
2644 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2645 {
2646 /* Now check the header of basic
2647 block. It ought to contain optional CODE_LABEL followed
2648 by NOTE_BASIC_BLOCK. */
2649 x = BB_HEAD (bb);
2650 if (LABEL_P (x))
2651 {
2652 if (BB_END (bb) == x)
2653 {
2654 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2655 bb->index);
2656 err = 1;
2657 }
2658
2659 x = NEXT_INSN (x);
2660 }
2661
2662 if (!NOTE_INSN_BASIC_BLOCK_P (x) || NOTE_BASIC_BLOCK (x) != bb)
2663 {
2664 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2665 bb->index);
2666 err = 1;
2667 }
2668
2669 if (BB_END (bb) == x)
2670 /* Do checks for empty blocks here. */
2671 ;
2672 else
2673 for (x = NEXT_INSN (x); x; x = NEXT_INSN (x))
2674 {
2675 if (NOTE_INSN_BASIC_BLOCK_P (x))
2676 {
2677 error ("NOTE_INSN_BASIC_BLOCK %d in middle of basic block %d",
2678 INSN_UID (x), bb->index);
2679 err = 1;
2680 }
2681
2682 if (x == BB_END (bb))
2683 break;
2684
2685 if (control_flow_insn_p (x))
2686 {
2687 error ("in basic block %d:", bb->index);
2688 fatal_insn ("flow control insn inside a basic block", x);
2689 }
2690 }
2691 }
2692
2693 /* Clean up. */
2694 return err;
2695 }
2696
2697 /* Verify that block pointers for instructions in basic blocks, headers and
2698 footers are set appropriately. */
2699
2700 static int
rtl_verify_bb_pointers(void)2701 rtl_verify_bb_pointers (void)
2702 {
2703 int err = 0;
2704 basic_block bb;
2705
2706 /* Check the general integrity of the basic blocks. */
2707 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2708 {
2709 rtx_insn *insn;
2710
2711 if (!(bb->flags & BB_RTL))
2712 {
2713 error ("BB_RTL flag not set for block %d", bb->index);
2714 err = 1;
2715 }
2716
2717 FOR_BB_INSNS (bb, insn)
2718 if (BLOCK_FOR_INSN (insn) != bb)
2719 {
2720 error ("insn %d basic block pointer is %d, should be %d",
2721 INSN_UID (insn),
2722 BLOCK_FOR_INSN (insn) ? BLOCK_FOR_INSN (insn)->index : 0,
2723 bb->index);
2724 err = 1;
2725 }
2726
2727 for (insn = BB_HEADER (bb); insn; insn = NEXT_INSN (insn))
2728 if (!BARRIER_P (insn)
2729 && BLOCK_FOR_INSN (insn) != NULL)
2730 {
2731 error ("insn %d in header of bb %d has non-NULL basic block",
2732 INSN_UID (insn), bb->index);
2733 err = 1;
2734 }
2735 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2736 if (!BARRIER_P (insn)
2737 && BLOCK_FOR_INSN (insn) != NULL)
2738 {
2739 error ("insn %d in footer of bb %d has non-NULL basic block",
2740 INSN_UID (insn), bb->index);
2741 err = 1;
2742 }
2743 }
2744
2745 /* Clean up. */
2746 return err;
2747 }
2748
2749 /* Verify the CFG and RTL consistency common for both underlying RTL and
2750 cfglayout RTL.
2751
2752 Currently it does following checks:
2753
2754 - overlapping of basic blocks
2755 - insns with wrong BLOCK_FOR_INSN pointers
2756 - headers of basic blocks (the NOTE_INSN_BASIC_BLOCK note)
2757 - tails of basic blocks (ensure that boundary is necessary)
2758 - scans body of the basic block for JUMP_INSN, CODE_LABEL
2759 and NOTE_INSN_BASIC_BLOCK
2760 - verify that no fall_thru edge crosses hot/cold partition boundaries
2761 - verify that there are no pending RTL branch predictions
2762 - verify that hot blocks are not dominated by cold blocks
2763
2764 In future it can be extended check a lot of other stuff as well
2765 (reachability of basic blocks, life information, etc. etc.). */
2766
2767 static int
rtl_verify_flow_info_1(void)2768 rtl_verify_flow_info_1 (void)
2769 {
2770 int err = 0;
2771
2772 err |= rtl_verify_bb_pointers ();
2773
2774 err |= rtl_verify_bb_insns ();
2775
2776 err |= rtl_verify_edges ();
2777
2778 return err;
2779 }
2780
2781 /* Walk the instruction chain and verify that bb head/end pointers
2782 are correct, and that instructions are in exactly one bb and have
2783 correct block pointers. */
2784
2785 static int
rtl_verify_bb_insn_chain(void)2786 rtl_verify_bb_insn_chain (void)
2787 {
2788 basic_block bb;
2789 int err = 0;
2790 rtx_insn *x;
2791 rtx_insn *last_head = get_last_insn ();
2792 basic_block *bb_info;
2793 const int max_uid = get_max_uid ();
2794
2795 bb_info = XCNEWVEC (basic_block, max_uid);
2796
2797 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2798 {
2799 rtx_insn *head = BB_HEAD (bb);
2800 rtx_insn *end = BB_END (bb);
2801
2802 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2803 {
2804 /* Verify the end of the basic block is in the INSN chain. */
2805 if (x == end)
2806 break;
2807
2808 /* And that the code outside of basic blocks has NULL bb field. */
2809 if (!BARRIER_P (x)
2810 && BLOCK_FOR_INSN (x) != NULL)
2811 {
2812 error ("insn %d outside of basic blocks has non-NULL bb field",
2813 INSN_UID (x));
2814 err = 1;
2815 }
2816 }
2817
2818 if (!x)
2819 {
2820 error ("end insn %d for block %d not found in the insn stream",
2821 INSN_UID (end), bb->index);
2822 err = 1;
2823 }
2824
2825 /* Work backwards from the end to the head of the basic block
2826 to verify the head is in the RTL chain. */
2827 for (; x != NULL_RTX; x = PREV_INSN (x))
2828 {
2829 /* While walking over the insn chain, verify insns appear
2830 in only one basic block. */
2831 if (bb_info[INSN_UID (x)] != NULL)
2832 {
2833 error ("insn %d is in multiple basic blocks (%d and %d)",
2834 INSN_UID (x), bb->index, bb_info[INSN_UID (x)]->index);
2835 err = 1;
2836 }
2837
2838 bb_info[INSN_UID (x)] = bb;
2839
2840 if (x == head)
2841 break;
2842 }
2843 if (!x)
2844 {
2845 error ("head insn %d for block %d not found in the insn stream",
2846 INSN_UID (head), bb->index);
2847 err = 1;
2848 }
2849
2850 last_head = PREV_INSN (x);
2851 }
2852
2853 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2854 {
2855 /* Check that the code before the first basic block has NULL
2856 bb field. */
2857 if (!BARRIER_P (x)
2858 && BLOCK_FOR_INSN (x) != NULL)
2859 {
2860 error ("insn %d outside of basic blocks has non-NULL bb field",
2861 INSN_UID (x));
2862 err = 1;
2863 }
2864 }
2865 free (bb_info);
2866
2867 return err;
2868 }
2869
2870 /* Verify that fallthru edges point to adjacent blocks in layout order and
2871 that barriers exist after non-fallthru blocks. */
2872
2873 static int
rtl_verify_fallthru(void)2874 rtl_verify_fallthru (void)
2875 {
2876 basic_block bb;
2877 int err = 0;
2878
2879 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2880 {
2881 edge e;
2882
2883 e = find_fallthru_edge (bb->succs);
2884 if (!e)
2885 {
2886 rtx_insn *insn;
2887
2888 /* Ensure existence of barrier in BB with no fallthru edges. */
2889 for (insn = NEXT_INSN (BB_END (bb)); ; insn = NEXT_INSN (insn))
2890 {
2891 if (!insn || NOTE_INSN_BASIC_BLOCK_P (insn))
2892 {
2893 error ("missing barrier after block %i", bb->index);
2894 err = 1;
2895 break;
2896 }
2897 if (BARRIER_P (insn))
2898 break;
2899 }
2900 }
2901 else if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2902 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2903 {
2904 rtx_insn *insn;
2905
2906 if (e->src->next_bb != e->dest)
2907 {
2908 error
2909 ("verify_flow_info: Incorrect blocks for fallthru %i->%i",
2910 e->src->index, e->dest->index);
2911 err = 1;
2912 }
2913 else
2914 for (insn = NEXT_INSN (BB_END (e->src)); insn != BB_HEAD (e->dest);
2915 insn = NEXT_INSN (insn))
2916 if (BARRIER_P (insn) || NONDEBUG_INSN_P (insn))
2917 {
2918 error ("verify_flow_info: Incorrect fallthru %i->%i",
2919 e->src->index, e->dest->index);
2920 fatal_insn ("wrong insn in the fallthru edge", insn);
2921 err = 1;
2922 }
2923 }
2924 }
2925
2926 return err;
2927 }
2928
2929 /* Verify that blocks are laid out in consecutive order. While walking the
2930 instructions, verify that all expected instructions are inside the basic
2931 blocks, and that all returns are followed by barriers. */
2932
2933 static int
rtl_verify_bb_layout(void)2934 rtl_verify_bb_layout (void)
2935 {
2936 basic_block bb;
2937 int err = 0;
2938 rtx_insn *x, *y;
2939 int num_bb_notes;
2940 rtx_insn * const rtx_first = get_insns ();
2941 basic_block last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun), curr_bb = NULL;
2942
2943 num_bb_notes = 0;
2944 last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun);
2945
2946 for (x = rtx_first; x; x = NEXT_INSN (x))
2947 {
2948 if (NOTE_INSN_BASIC_BLOCK_P (x))
2949 {
2950 bb = NOTE_BASIC_BLOCK (x);
2951
2952 num_bb_notes++;
2953 if (bb != last_bb_seen->next_bb)
2954 internal_error ("basic blocks not laid down consecutively");
2955
2956 curr_bb = last_bb_seen = bb;
2957 }
2958
2959 if (!curr_bb)
2960 {
2961 switch (GET_CODE (x))
2962 {
2963 case BARRIER:
2964 case NOTE:
2965 break;
2966
2967 case CODE_LABEL:
2968 /* An ADDR_VEC is placed outside any basic block. */
2969 if (NEXT_INSN (x)
2970 && JUMP_TABLE_DATA_P (NEXT_INSN (x)))
2971 x = NEXT_INSN (x);
2972
2973 /* But in any case, non-deletable labels can appear anywhere. */
2974 break;
2975
2976 default:
2977 fatal_insn ("insn outside basic block", x);
2978 }
2979 }
2980
2981 if (JUMP_P (x)
2982 && returnjump_p (x) && ! condjump_p (x)
2983 && ! ((y = next_nonnote_nondebug_insn (x))
2984 && BARRIER_P (y)))
2985 fatal_insn ("return not followed by barrier", x);
2986
2987 if (curr_bb && x == BB_END (curr_bb))
2988 curr_bb = NULL;
2989 }
2990
2991 if (num_bb_notes != n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS)
2992 internal_error
2993 ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)",
2994 num_bb_notes, n_basic_blocks_for_fn (cfun));
2995
2996 return err;
2997 }
2998
2999 /* Verify the CFG and RTL consistency common for both underlying RTL and
3000 cfglayout RTL, plus consistency checks specific to linearized RTL mode.
3001
3002 Currently it does following checks:
3003 - all checks of rtl_verify_flow_info_1
3004 - test head/end pointers
3005 - check that blocks are laid out in consecutive order
3006 - check that all insns are in the basic blocks
3007 (except the switch handling code, barriers and notes)
3008 - check that all returns are followed by barriers
3009 - check that all fallthru edge points to the adjacent blocks
3010 - verify that there is a single hot/cold partition boundary after bbro */
3011
3012 static int
rtl_verify_flow_info(void)3013 rtl_verify_flow_info (void)
3014 {
3015 int err = 0;
3016
3017 err |= rtl_verify_flow_info_1 ();
3018
3019 err |= rtl_verify_bb_insn_chain ();
3020
3021 err |= rtl_verify_fallthru ();
3022
3023 err |= rtl_verify_bb_layout ();
3024
3025 err |= verify_hot_cold_block_grouping ();
3026
3027 return err;
3028 }
3029
3030 /* Assume that the preceding pass has possibly eliminated jump instructions
3031 or converted the unconditional jumps. Eliminate the edges from CFG.
3032 Return true if any edges are eliminated. */
3033
3034 bool
purge_dead_edges(basic_block bb)3035 purge_dead_edges (basic_block bb)
3036 {
3037 edge e;
3038 rtx_insn *insn = BB_END (bb);
3039 rtx note;
3040 bool purged = false;
3041 bool found;
3042 edge_iterator ei;
3043
3044 if (DEBUG_INSN_P (insn) && insn != BB_HEAD (bb))
3045 do
3046 insn = PREV_INSN (insn);
3047 while ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb));
3048
3049 /* If this instruction cannot trap, remove REG_EH_REGION notes. */
3050 if (NONJUMP_INSN_P (insn)
3051 && (note = find_reg_note (insn, REG_EH_REGION, NULL)))
3052 {
3053 rtx eqnote;
3054
3055 if (! may_trap_p (PATTERN (insn))
3056 || ((eqnote = find_reg_equal_equiv_note (insn))
3057 && ! may_trap_p (XEXP (eqnote, 0))))
3058 remove_note (insn, note);
3059 }
3060
3061 /* Cleanup abnormal edges caused by exceptions or non-local gotos. */
3062 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3063 {
3064 bool remove = false;
3065
3066 /* There are three types of edges we need to handle correctly here: EH
3067 edges, abnormal call EH edges, and abnormal call non-EH edges. The
3068 latter can appear when nonlocal gotos are used. */
3069 if (e->flags & EDGE_ABNORMAL_CALL)
3070 {
3071 if (!CALL_P (insn))
3072 remove = true;
3073 else if (can_nonlocal_goto (insn))
3074 ;
3075 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3076 ;
3077 else if (flag_tm && find_reg_note (insn, REG_TM, NULL))
3078 ;
3079 else
3080 remove = true;
3081 }
3082 else if (e->flags & EDGE_EH)
3083 remove = !can_throw_internal (insn);
3084
3085 if (remove)
3086 {
3087 remove_edge (e);
3088 df_set_bb_dirty (bb);
3089 purged = true;
3090 }
3091 else
3092 ei_next (&ei);
3093 }
3094
3095 if (JUMP_P (insn))
3096 {
3097 rtx note;
3098 edge b,f;
3099 edge_iterator ei;
3100
3101 /* We do care only about conditional jumps and simplejumps. */
3102 if (!any_condjump_p (insn)
3103 && !returnjump_p (insn)
3104 && !simplejump_p (insn))
3105 return purged;
3106
3107 /* Branch probability/prediction notes are defined only for
3108 condjumps. We've possibly turned condjump into simplejump. */
3109 if (simplejump_p (insn))
3110 {
3111 note = find_reg_note (insn, REG_BR_PROB, NULL);
3112 if (note)
3113 remove_note (insn, note);
3114 while ((note = find_reg_note (insn, REG_BR_PRED, NULL)))
3115 remove_note (insn, note);
3116 }
3117
3118 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3119 {
3120 /* Avoid abnormal flags to leak from computed jumps turned
3121 into simplejumps. */
3122
3123 e->flags &= ~EDGE_ABNORMAL;
3124
3125 /* See if this edge is one we should keep. */
3126 if ((e->flags & EDGE_FALLTHRU) && any_condjump_p (insn))
3127 /* A conditional jump can fall through into the next
3128 block, so we should keep the edge. */
3129 {
3130 ei_next (&ei);
3131 continue;
3132 }
3133 else if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
3134 && BB_HEAD (e->dest) == JUMP_LABEL (insn))
3135 /* If the destination block is the target of the jump,
3136 keep the edge. */
3137 {
3138 ei_next (&ei);
3139 continue;
3140 }
3141 else if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
3142 && returnjump_p (insn))
3143 /* If the destination block is the exit block, and this
3144 instruction is a return, then keep the edge. */
3145 {
3146 ei_next (&ei);
3147 continue;
3148 }
3149 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3150 /* Keep the edges that correspond to exceptions thrown by
3151 this instruction and rematerialize the EDGE_ABNORMAL
3152 flag we just cleared above. */
3153 {
3154 e->flags |= EDGE_ABNORMAL;
3155 ei_next (&ei);
3156 continue;
3157 }
3158
3159 /* We do not need this edge. */
3160 df_set_bb_dirty (bb);
3161 purged = true;
3162 remove_edge (e);
3163 }
3164
3165 if (EDGE_COUNT (bb->succs) == 0 || !purged)
3166 return purged;
3167
3168 if (dump_file)
3169 fprintf (dump_file, "Purged edges from bb %i\n", bb->index);
3170
3171 if (!optimize)
3172 return purged;
3173
3174 /* Redistribute probabilities. */
3175 if (single_succ_p (bb))
3176 {
3177 single_succ_edge (bb)->probability = profile_probability::always ();
3178 }
3179 else
3180 {
3181 note = find_reg_note (insn, REG_BR_PROB, NULL);
3182 if (!note)
3183 return purged;
3184
3185 b = BRANCH_EDGE (bb);
3186 f = FALLTHRU_EDGE (bb);
3187 b->probability = profile_probability::from_reg_br_prob_note
3188 (XINT (note, 0));
3189 f->probability = b->probability.invert ();
3190 }
3191
3192 return purged;
3193 }
3194 else if (CALL_P (insn) && SIBLING_CALL_P (insn))
3195 {
3196 /* First, there should not be any EH or ABCALL edges resulting
3197 from non-local gotos and the like. If there were, we shouldn't
3198 have created the sibcall in the first place. Second, there
3199 should of course never have been a fallthru edge. */
3200 gcc_assert (single_succ_p (bb));
3201 gcc_assert (single_succ_edge (bb)->flags
3202 == (EDGE_SIBCALL | EDGE_ABNORMAL));
3203
3204 return 0;
3205 }
3206
3207 /* If we don't see a jump insn, we don't know exactly why the block would
3208 have been broken at this point. Look for a simple, non-fallthru edge,
3209 as these are only created by conditional branches. If we find such an
3210 edge we know that there used to be a jump here and can then safely
3211 remove all non-fallthru edges. */
3212 found = false;
3213 FOR_EACH_EDGE (e, ei, bb->succs)
3214 if (! (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)))
3215 {
3216 found = true;
3217 break;
3218 }
3219
3220 if (!found)
3221 return purged;
3222
3223 /* Remove all but the fake and fallthru edges. The fake edge may be
3224 the only successor for this block in the case of noreturn
3225 calls. */
3226 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3227 {
3228 if (!(e->flags & (EDGE_FALLTHRU | EDGE_FAKE)))
3229 {
3230 df_set_bb_dirty (bb);
3231 remove_edge (e);
3232 purged = true;
3233 }
3234 else
3235 ei_next (&ei);
3236 }
3237
3238 gcc_assert (single_succ_p (bb));
3239
3240 single_succ_edge (bb)->probability = profile_probability::always ();
3241
3242 if (dump_file)
3243 fprintf (dump_file, "Purged non-fallthru edges from bb %i\n",
3244 bb->index);
3245 return purged;
3246 }
3247
3248 /* Search all basic blocks for potentially dead edges and purge them. Return
3249 true if some edge has been eliminated. */
3250
3251 bool
purge_all_dead_edges(void)3252 purge_all_dead_edges (void)
3253 {
3254 int purged = false;
3255 basic_block bb;
3256
3257 FOR_EACH_BB_FN (bb, cfun)
3258 {
3259 bool purged_here = purge_dead_edges (bb);
3260
3261 purged |= purged_here;
3262 }
3263
3264 return purged;
3265 }
3266
3267 /* This is used by a few passes that emit some instructions after abnormal
3268 calls, moving the basic block's end, while they in fact do want to emit
3269 them on the fallthru edge. Look for abnormal call edges, find backward
3270 the call in the block and insert the instructions on the edge instead.
3271
3272 Similarly, handle instructions throwing exceptions internally.
3273
3274 Return true when instructions have been found and inserted on edges. */
3275
3276 bool
fixup_abnormal_edges(void)3277 fixup_abnormal_edges (void)
3278 {
3279 bool inserted = false;
3280 basic_block bb;
3281
3282 FOR_EACH_BB_FN (bb, cfun)
3283 {
3284 edge e;
3285 edge_iterator ei;
3286
3287 /* Look for cases we are interested in - calls or instructions causing
3288 exceptions. */
3289 FOR_EACH_EDGE (e, ei, bb->succs)
3290 if ((e->flags & EDGE_ABNORMAL_CALL)
3291 || ((e->flags & (EDGE_ABNORMAL | EDGE_EH))
3292 == (EDGE_ABNORMAL | EDGE_EH)))
3293 break;
3294
3295 if (e && !CALL_P (BB_END (bb)) && !can_throw_internal (BB_END (bb)))
3296 {
3297 rtx_insn *insn;
3298
3299 /* Get past the new insns generated. Allow notes, as the insns
3300 may be already deleted. */
3301 insn = BB_END (bb);
3302 while ((NONJUMP_INSN_P (insn) || NOTE_P (insn))
3303 && !can_throw_internal (insn)
3304 && insn != BB_HEAD (bb))
3305 insn = PREV_INSN (insn);
3306
3307 if (CALL_P (insn) || can_throw_internal (insn))
3308 {
3309 rtx_insn *stop, *next;
3310
3311 e = find_fallthru_edge (bb->succs);
3312
3313 stop = NEXT_INSN (BB_END (bb));
3314 BB_END (bb) = insn;
3315
3316 for (insn = NEXT_INSN (insn); insn != stop; insn = next)
3317 {
3318 next = NEXT_INSN (insn);
3319 if (INSN_P (insn))
3320 {
3321 delete_insn (insn);
3322
3323 /* Sometimes there's still the return value USE.
3324 If it's placed after a trapping call (i.e. that
3325 call is the last insn anyway), we have no fallthru
3326 edge. Simply delete this use and don't try to insert
3327 on the non-existent edge.
3328 Similarly, sometimes a call that can throw is
3329 followed in the source with __builtin_unreachable (),
3330 meaning that there is UB if the call returns rather
3331 than throws. If there weren't any instructions
3332 following such calls before, supposedly even the ones
3333 we've deleted aren't significant and can be
3334 removed. */
3335 if (e)
3336 {
3337 /* We're not deleting it, we're moving it. */
3338 insn->set_undeleted ();
3339 SET_PREV_INSN (insn) = NULL_RTX;
3340 SET_NEXT_INSN (insn) = NULL_RTX;
3341
3342 insert_insn_on_edge (insn, e);
3343 inserted = true;
3344 }
3345 }
3346 else if (!BARRIER_P (insn))
3347 set_block_for_insn (insn, NULL);
3348 }
3349 }
3350
3351 /* It may be that we don't find any trapping insn. In this
3352 case we discovered quite late that the insn that had been
3353 marked as can_throw_internal in fact couldn't trap at all.
3354 So we should in fact delete the EH edges out of the block. */
3355 else
3356 purge_dead_edges (bb);
3357 }
3358 }
3359
3360 return inserted;
3361 }
3362
3363 /* Cut the insns from FIRST to LAST out of the insns stream. */
3364
3365 rtx_insn *
unlink_insn_chain(rtx_insn * first,rtx_insn * last)3366 unlink_insn_chain (rtx_insn *first, rtx_insn *last)
3367 {
3368 rtx_insn *prevfirst = PREV_INSN (first);
3369 rtx_insn *nextlast = NEXT_INSN (last);
3370
3371 SET_PREV_INSN (first) = NULL;
3372 SET_NEXT_INSN (last) = NULL;
3373 if (prevfirst)
3374 SET_NEXT_INSN (prevfirst) = nextlast;
3375 if (nextlast)
3376 SET_PREV_INSN (nextlast) = prevfirst;
3377 else
3378 set_last_insn (prevfirst);
3379 if (!prevfirst)
3380 set_first_insn (nextlast);
3381 return first;
3382 }
3383
3384 /* Skip over inter-block insns occurring after BB which are typically
3385 associated with BB (e.g., barriers). If there are any such insns,
3386 we return the last one. Otherwise, we return the end of BB. */
3387
3388 static rtx_insn *
skip_insns_after_block(basic_block bb)3389 skip_insns_after_block (basic_block bb)
3390 {
3391 rtx_insn *insn, *last_insn, *next_head, *prev;
3392
3393 next_head = NULL;
3394 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3395 next_head = BB_HEAD (bb->next_bb);
3396
3397 for (last_insn = insn = BB_END (bb); (insn = NEXT_INSN (insn)) != 0; )
3398 {
3399 if (insn == next_head)
3400 break;
3401
3402 switch (GET_CODE (insn))
3403 {
3404 case BARRIER:
3405 last_insn = insn;
3406 continue;
3407
3408 case NOTE:
3409 switch (NOTE_KIND (insn))
3410 {
3411 case NOTE_INSN_BLOCK_END:
3412 gcc_unreachable ();
3413 continue;
3414 default:
3415 continue;
3416 break;
3417 }
3418 break;
3419
3420 case CODE_LABEL:
3421 if (NEXT_INSN (insn)
3422 && JUMP_TABLE_DATA_P (NEXT_INSN (insn)))
3423 {
3424 insn = NEXT_INSN (insn);
3425 last_insn = insn;
3426 continue;
3427 }
3428 break;
3429
3430 default:
3431 break;
3432 }
3433
3434 break;
3435 }
3436
3437 /* It is possible to hit contradictory sequence. For instance:
3438
3439 jump_insn
3440 NOTE_INSN_BLOCK_BEG
3441 barrier
3442
3443 Where barrier belongs to jump_insn, but the note does not. This can be
3444 created by removing the basic block originally following
3445 NOTE_INSN_BLOCK_BEG. In such case reorder the notes. */
3446
3447 for (insn = last_insn; insn != BB_END (bb); insn = prev)
3448 {
3449 prev = PREV_INSN (insn);
3450 if (NOTE_P (insn))
3451 switch (NOTE_KIND (insn))
3452 {
3453 case NOTE_INSN_BLOCK_END:
3454 gcc_unreachable ();
3455 break;
3456 case NOTE_INSN_DELETED:
3457 case NOTE_INSN_DELETED_LABEL:
3458 case NOTE_INSN_DELETED_DEBUG_LABEL:
3459 continue;
3460 default:
3461 reorder_insns (insn, insn, last_insn);
3462 }
3463 }
3464
3465 return last_insn;
3466 }
3467
3468 /* Locate or create a label for a given basic block. */
3469
3470 static rtx_insn *
label_for_bb(basic_block bb)3471 label_for_bb (basic_block bb)
3472 {
3473 rtx_insn *label = BB_HEAD (bb);
3474
3475 if (!LABEL_P (label))
3476 {
3477 if (dump_file)
3478 fprintf (dump_file, "Emitting label for block %d\n", bb->index);
3479
3480 label = block_label (bb);
3481 }
3482
3483 return label;
3484 }
3485
3486 /* Locate the effective beginning and end of the insn chain for each
3487 block, as defined by skip_insns_after_block above. */
3488
3489 static void
record_effective_endpoints(void)3490 record_effective_endpoints (void)
3491 {
3492 rtx_insn *next_insn;
3493 basic_block bb;
3494 rtx_insn *insn;
3495
3496 for (insn = get_insns ();
3497 insn
3498 && NOTE_P (insn)
3499 && NOTE_KIND (insn) != NOTE_INSN_BASIC_BLOCK;
3500 insn = NEXT_INSN (insn))
3501 continue;
3502 /* No basic blocks at all? */
3503 gcc_assert (insn);
3504
3505 if (PREV_INSN (insn))
3506 cfg_layout_function_header =
3507 unlink_insn_chain (get_insns (), PREV_INSN (insn));
3508 else
3509 cfg_layout_function_header = NULL;
3510
3511 next_insn = get_insns ();
3512 FOR_EACH_BB_FN (bb, cfun)
3513 {
3514 rtx_insn *end;
3515
3516 if (PREV_INSN (BB_HEAD (bb)) && next_insn != BB_HEAD (bb))
3517 BB_HEADER (bb) = unlink_insn_chain (next_insn,
3518 PREV_INSN (BB_HEAD (bb)));
3519 end = skip_insns_after_block (bb);
3520 if (NEXT_INSN (BB_END (bb)) && BB_END (bb) != end)
3521 BB_FOOTER (bb) = unlink_insn_chain (NEXT_INSN (BB_END (bb)), end);
3522 next_insn = NEXT_INSN (BB_END (bb));
3523 }
3524
3525 cfg_layout_function_footer = next_insn;
3526 if (cfg_layout_function_footer)
3527 cfg_layout_function_footer = unlink_insn_chain (cfg_layout_function_footer, get_last_insn ());
3528 }
3529
3530 namespace {
3531
3532 const pass_data pass_data_into_cfg_layout_mode =
3533 {
3534 RTL_PASS, /* type */
3535 "into_cfglayout", /* name */
3536 OPTGROUP_NONE, /* optinfo_flags */
3537 TV_CFG, /* tv_id */
3538 0, /* properties_required */
3539 PROP_cfglayout, /* properties_provided */
3540 0, /* properties_destroyed */
3541 0, /* todo_flags_start */
3542 0, /* todo_flags_finish */
3543 };
3544
3545 class pass_into_cfg_layout_mode : public rtl_opt_pass
3546 {
3547 public:
pass_into_cfg_layout_mode(gcc::context * ctxt)3548 pass_into_cfg_layout_mode (gcc::context *ctxt)
3549 : rtl_opt_pass (pass_data_into_cfg_layout_mode, ctxt)
3550 {}
3551
3552 /* opt_pass methods: */
execute(function *)3553 virtual unsigned int execute (function *)
3554 {
3555 cfg_layout_initialize (0);
3556 return 0;
3557 }
3558
3559 }; // class pass_into_cfg_layout_mode
3560
3561 } // anon namespace
3562
3563 rtl_opt_pass *
make_pass_into_cfg_layout_mode(gcc::context * ctxt)3564 make_pass_into_cfg_layout_mode (gcc::context *ctxt)
3565 {
3566 return new pass_into_cfg_layout_mode (ctxt);
3567 }
3568
3569 namespace {
3570
3571 const pass_data pass_data_outof_cfg_layout_mode =
3572 {
3573 RTL_PASS, /* type */
3574 "outof_cfglayout", /* name */
3575 OPTGROUP_NONE, /* optinfo_flags */
3576 TV_CFG, /* tv_id */
3577 0, /* properties_required */
3578 0, /* properties_provided */
3579 PROP_cfglayout, /* properties_destroyed */
3580 0, /* todo_flags_start */
3581 0, /* todo_flags_finish */
3582 };
3583
3584 class pass_outof_cfg_layout_mode : public rtl_opt_pass
3585 {
3586 public:
pass_outof_cfg_layout_mode(gcc::context * ctxt)3587 pass_outof_cfg_layout_mode (gcc::context *ctxt)
3588 : rtl_opt_pass (pass_data_outof_cfg_layout_mode, ctxt)
3589 {}
3590
3591 /* opt_pass methods: */
3592 virtual unsigned int execute (function *);
3593
3594 }; // class pass_outof_cfg_layout_mode
3595
3596 unsigned int
execute(function * fun)3597 pass_outof_cfg_layout_mode::execute (function *fun)
3598 {
3599 basic_block bb;
3600
3601 FOR_EACH_BB_FN (bb, fun)
3602 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (fun))
3603 bb->aux = bb->next_bb;
3604
3605 cfg_layout_finalize ();
3606
3607 return 0;
3608 }
3609
3610 } // anon namespace
3611
3612 rtl_opt_pass *
make_pass_outof_cfg_layout_mode(gcc::context * ctxt)3613 make_pass_outof_cfg_layout_mode (gcc::context *ctxt)
3614 {
3615 return new pass_outof_cfg_layout_mode (ctxt);
3616 }
3617
3618
3619 /* Link the basic blocks in the correct order, compacting the basic
3620 block queue while at it. If STAY_IN_CFGLAYOUT_MODE is false, this
3621 function also clears the basic block header and footer fields.
3622
3623 This function is usually called after a pass (e.g. tracer) finishes
3624 some transformations while in cfglayout mode. The required sequence
3625 of the basic blocks is in a linked list along the bb->aux field.
3626 This functions re-links the basic block prev_bb and next_bb pointers
3627 accordingly, and it compacts and renumbers the blocks.
3628
3629 FIXME: This currently works only for RTL, but the only RTL-specific
3630 bits are the STAY_IN_CFGLAYOUT_MODE bits. The tracer pass was moved
3631 to GIMPLE a long time ago, but it doesn't relink the basic block
3632 chain. It could do that (to give better initial RTL) if this function
3633 is made IR-agnostic (and moved to cfganal.c or cfg.c while at it). */
3634
3635 void
relink_block_chain(bool stay_in_cfglayout_mode)3636 relink_block_chain (bool stay_in_cfglayout_mode)
3637 {
3638 basic_block bb, prev_bb;
3639 int index;
3640
3641 /* Maybe dump the re-ordered sequence. */
3642 if (dump_file)
3643 {
3644 fprintf (dump_file, "Reordered sequence:\n");
3645 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, index =
3646 NUM_FIXED_BLOCKS;
3647 bb;
3648 bb = (basic_block) bb->aux, index++)
3649 {
3650 fprintf (dump_file, " %i ", index);
3651 if (get_bb_original (bb))
3652 fprintf (dump_file, "duplicate of %i ",
3653 get_bb_original (bb)->index);
3654 else if (forwarder_block_p (bb)
3655 && !LABEL_P (BB_HEAD (bb)))
3656 fprintf (dump_file, "compensation ");
3657 else
3658 fprintf (dump_file, "bb %i ", bb->index);
3659 }
3660 }
3661
3662 /* Now reorder the blocks. */
3663 prev_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
3664 bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
3665 for (; bb; prev_bb = bb, bb = (basic_block) bb->aux)
3666 {
3667 bb->prev_bb = prev_bb;
3668 prev_bb->next_bb = bb;
3669 }
3670 prev_bb->next_bb = EXIT_BLOCK_PTR_FOR_FN (cfun);
3671 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb = prev_bb;
3672
3673 /* Then, clean up the aux fields. */
3674 FOR_ALL_BB_FN (bb, cfun)
3675 {
3676 bb->aux = NULL;
3677 if (!stay_in_cfglayout_mode)
3678 BB_HEADER (bb) = BB_FOOTER (bb) = NULL;
3679 }
3680
3681 /* Maybe reset the original copy tables, they are not valid anymore
3682 when we renumber the basic blocks in compact_blocks. If we are
3683 are going out of cfglayout mode, don't re-allocate the tables. */
3684 if (original_copy_tables_initialized_p ())
3685 free_original_copy_tables ();
3686 if (stay_in_cfglayout_mode)
3687 initialize_original_copy_tables ();
3688
3689 /* Finally, put basic_block_info in the new order. */
3690 compact_blocks ();
3691 }
3692
3693
3694 /* Given a reorder chain, rearrange the code to match. */
3695
3696 static void
fixup_reorder_chain(void)3697 fixup_reorder_chain (void)
3698 {
3699 basic_block bb;
3700 rtx_insn *insn = NULL;
3701
3702 if (cfg_layout_function_header)
3703 {
3704 set_first_insn (cfg_layout_function_header);
3705 insn = cfg_layout_function_header;
3706 while (NEXT_INSN (insn))
3707 insn = NEXT_INSN (insn);
3708 }
3709
3710 /* First do the bulk reordering -- rechain the blocks without regard to
3711 the needed changes to jumps and labels. */
3712
3713 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb; bb = (basic_block)
3714 bb->aux)
3715 {
3716 if (BB_HEADER (bb))
3717 {
3718 if (insn)
3719 SET_NEXT_INSN (insn) = BB_HEADER (bb);
3720 else
3721 set_first_insn (BB_HEADER (bb));
3722 SET_PREV_INSN (BB_HEADER (bb)) = insn;
3723 insn = BB_HEADER (bb);
3724 while (NEXT_INSN (insn))
3725 insn = NEXT_INSN (insn);
3726 }
3727 if (insn)
3728 SET_NEXT_INSN (insn) = BB_HEAD (bb);
3729 else
3730 set_first_insn (BB_HEAD (bb));
3731 SET_PREV_INSN (BB_HEAD (bb)) = insn;
3732 insn = BB_END (bb);
3733 if (BB_FOOTER (bb))
3734 {
3735 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
3736 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
3737 while (NEXT_INSN (insn))
3738 insn = NEXT_INSN (insn);
3739 }
3740 }
3741
3742 SET_NEXT_INSN (insn) = cfg_layout_function_footer;
3743 if (cfg_layout_function_footer)
3744 SET_PREV_INSN (cfg_layout_function_footer) = insn;
3745
3746 while (NEXT_INSN (insn))
3747 insn = NEXT_INSN (insn);
3748
3749 set_last_insn (insn);
3750 if (flag_checking)
3751 verify_insn_chain ();
3752
3753 /* Now add jumps and labels as needed to match the blocks new
3754 outgoing edges. */
3755
3756 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb ; bb = (basic_block)
3757 bb->aux)
3758 {
3759 edge e_fall, e_taken, e;
3760 rtx_insn *bb_end_insn;
3761 rtx ret_label = NULL_RTX;
3762 basic_block nb;
3763 edge_iterator ei;
3764
3765 if (EDGE_COUNT (bb->succs) == 0)
3766 continue;
3767
3768 /* Find the old fallthru edge, and another non-EH edge for
3769 a taken jump. */
3770 e_taken = e_fall = NULL;
3771
3772 FOR_EACH_EDGE (e, ei, bb->succs)
3773 if (e->flags & EDGE_FALLTHRU)
3774 e_fall = e;
3775 else if (! (e->flags & EDGE_EH))
3776 e_taken = e;
3777
3778 bb_end_insn = BB_END (bb);
3779 if (rtx_jump_insn *bb_end_jump = dyn_cast <rtx_jump_insn *> (bb_end_insn))
3780 {
3781 ret_label = JUMP_LABEL (bb_end_jump);
3782 if (any_condjump_p (bb_end_jump))
3783 {
3784 /* This might happen if the conditional jump has side
3785 effects and could therefore not be optimized away.
3786 Make the basic block to end with a barrier in order
3787 to prevent rtl_verify_flow_info from complaining. */
3788 if (!e_fall)
3789 {
3790 gcc_assert (!onlyjump_p (bb_end_jump)
3791 || returnjump_p (bb_end_jump)
3792 || (e_taken->flags & EDGE_CROSSING));
3793 emit_barrier_after (bb_end_jump);
3794 continue;
3795 }
3796
3797 /* If the old fallthru is still next, nothing to do. */
3798 if (bb->aux == e_fall->dest
3799 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3800 continue;
3801
3802 /* The degenerated case of conditional jump jumping to the next
3803 instruction can happen for jumps with side effects. We need
3804 to construct a forwarder block and this will be done just
3805 fine by force_nonfallthru below. */
3806 if (!e_taken)
3807 ;
3808
3809 /* There is another special case: if *neither* block is next,
3810 such as happens at the very end of a function, then we'll
3811 need to add a new unconditional jump. Choose the taken
3812 edge based on known or assumed probability. */
3813 else if (bb->aux != e_taken->dest)
3814 {
3815 rtx note = find_reg_note (bb_end_jump, REG_BR_PROB, 0);
3816
3817 if (note
3818 && profile_probability::from_reg_br_prob_note
3819 (XINT (note, 0)) < profile_probability::even ()
3820 && invert_jump (bb_end_jump,
3821 (e_fall->dest
3822 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3823 ? NULL_RTX
3824 : label_for_bb (e_fall->dest)), 0))
3825 {
3826 e_fall->flags &= ~EDGE_FALLTHRU;
3827 gcc_checking_assert (could_fall_through
3828 (e_taken->src, e_taken->dest));
3829 e_taken->flags |= EDGE_FALLTHRU;
3830 update_br_prob_note (bb);
3831 e = e_fall, e_fall = e_taken, e_taken = e;
3832 }
3833 }
3834
3835 /* If the "jumping" edge is a crossing edge, and the fall
3836 through edge is non-crossing, leave things as they are. */
3837 else if ((e_taken->flags & EDGE_CROSSING)
3838 && !(e_fall->flags & EDGE_CROSSING))
3839 continue;
3840
3841 /* Otherwise we can try to invert the jump. This will
3842 basically never fail, however, keep up the pretense. */
3843 else if (invert_jump (bb_end_jump,
3844 (e_fall->dest
3845 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3846 ? NULL_RTX
3847 : label_for_bb (e_fall->dest)), 0))
3848 {
3849 e_fall->flags &= ~EDGE_FALLTHRU;
3850 gcc_checking_assert (could_fall_through
3851 (e_taken->src, e_taken->dest));
3852 e_taken->flags |= EDGE_FALLTHRU;
3853 update_br_prob_note (bb);
3854 if (LABEL_NUSES (ret_label) == 0
3855 && single_pred_p (e_taken->dest))
3856 delete_insn (as_a<rtx_insn *> (ret_label));
3857 continue;
3858 }
3859 }
3860 else if (extract_asm_operands (PATTERN (bb_end_insn)) != NULL)
3861 {
3862 /* If the old fallthru is still next or if
3863 asm goto doesn't have a fallthru (e.g. when followed by
3864 __builtin_unreachable ()), nothing to do. */
3865 if (! e_fall
3866 || bb->aux == e_fall->dest
3867 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3868 continue;
3869
3870 /* Otherwise we'll have to use the fallthru fixup below. */
3871 }
3872 else
3873 {
3874 /* Otherwise we have some return, switch or computed
3875 jump. In the 99% case, there should not have been a
3876 fallthru edge. */
3877 gcc_assert (returnjump_p (bb_end_insn) || !e_fall);
3878 continue;
3879 }
3880 }
3881 else
3882 {
3883 /* No fallthru implies a noreturn function with EH edges, or
3884 something similarly bizarre. In any case, we don't need to
3885 do anything. */
3886 if (! e_fall)
3887 continue;
3888
3889 /* If the fallthru block is still next, nothing to do. */
3890 if (bb->aux == e_fall->dest)
3891 continue;
3892
3893 /* A fallthru to exit block. */
3894 if (e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3895 continue;
3896 }
3897
3898 /* We got here if we need to add a new jump insn.
3899 Note force_nonfallthru can delete E_FALL and thus we have to
3900 save E_FALL->src prior to the call to force_nonfallthru. */
3901 nb = force_nonfallthru_and_redirect (e_fall, e_fall->dest, ret_label);
3902 if (nb)
3903 {
3904 nb->aux = bb->aux;
3905 bb->aux = nb;
3906 /* Don't process this new block. */
3907 bb = nb;
3908 }
3909 }
3910
3911 relink_block_chain (/*stay_in_cfglayout_mode=*/false);
3912
3913 /* Annoying special case - jump around dead jumptables left in the code. */
3914 FOR_EACH_BB_FN (bb, cfun)
3915 {
3916 edge e = find_fallthru_edge (bb->succs);
3917
3918 if (e && !can_fallthru (e->src, e->dest))
3919 force_nonfallthru (e);
3920 }
3921
3922 /* Ensure goto_locus from edges has some instructions with that locus
3923 in RTL. */
3924 if (!optimize)
3925 FOR_EACH_BB_FN (bb, cfun)
3926 {
3927 edge e;
3928 edge_iterator ei;
3929
3930 FOR_EACH_EDGE (e, ei, bb->succs)
3931 if (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
3932 && !(e->flags & EDGE_ABNORMAL))
3933 {
3934 edge e2;
3935 edge_iterator ei2;
3936 basic_block dest, nb;
3937 rtx_insn *end;
3938
3939 insn = BB_END (e->src);
3940 end = PREV_INSN (BB_HEAD (e->src));
3941 while (insn != end
3942 && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
3943 insn = PREV_INSN (insn);
3944 if (insn != end
3945 && INSN_LOCATION (insn) == e->goto_locus)
3946 continue;
3947 if (simplejump_p (BB_END (e->src))
3948 && !INSN_HAS_LOCATION (BB_END (e->src)))
3949 {
3950 INSN_LOCATION (BB_END (e->src)) = e->goto_locus;
3951 continue;
3952 }
3953 dest = e->dest;
3954 if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3955 {
3956 /* Non-fallthru edges to the exit block cannot be split. */
3957 if (!(e->flags & EDGE_FALLTHRU))
3958 continue;
3959 }
3960 else
3961 {
3962 insn = BB_HEAD (dest);
3963 end = NEXT_INSN (BB_END (dest));
3964 while (insn != end && !NONDEBUG_INSN_P (insn))
3965 insn = NEXT_INSN (insn);
3966 if (insn != end && INSN_HAS_LOCATION (insn)
3967 && INSN_LOCATION (insn) == e->goto_locus)
3968 continue;
3969 }
3970 nb = split_edge (e);
3971 if (!INSN_P (BB_END (nb)))
3972 BB_END (nb) = emit_insn_after_noloc (gen_nop (), BB_END (nb),
3973 nb);
3974 INSN_LOCATION (BB_END (nb)) = e->goto_locus;
3975
3976 /* If there are other incoming edges to the destination block
3977 with the same goto locus, redirect them to the new block as
3978 well, this can prevent other such blocks from being created
3979 in subsequent iterations of the loop. */
3980 for (ei2 = ei_start (dest->preds); (e2 = ei_safe_edge (ei2)); )
3981 if (LOCATION_LOCUS (e2->goto_locus) != UNKNOWN_LOCATION
3982 && !(e2->flags & (EDGE_ABNORMAL | EDGE_FALLTHRU))
3983 && e->goto_locus == e2->goto_locus)
3984 redirect_edge_and_branch (e2, nb);
3985 else
3986 ei_next (&ei2);
3987 }
3988 }
3989 }
3990
3991 /* Perform sanity checks on the insn chain.
3992 1. Check that next/prev pointers are consistent in both the forward and
3993 reverse direction.
3994 2. Count insns in chain, going both directions, and check if equal.
3995 3. Check that get_last_insn () returns the actual end of chain. */
3996
3997 DEBUG_FUNCTION void
verify_insn_chain(void)3998 verify_insn_chain (void)
3999 {
4000 rtx_insn *x, *prevx, *nextx;
4001 int insn_cnt1, insn_cnt2;
4002
4003 for (prevx = NULL, insn_cnt1 = 1, x = get_insns ();
4004 x != 0;
4005 prevx = x, insn_cnt1++, x = NEXT_INSN (x))
4006 gcc_assert (PREV_INSN (x) == prevx);
4007
4008 gcc_assert (prevx == get_last_insn ());
4009
4010 for (nextx = NULL, insn_cnt2 = 1, x = get_last_insn ();
4011 x != 0;
4012 nextx = x, insn_cnt2++, x = PREV_INSN (x))
4013 gcc_assert (NEXT_INSN (x) == nextx);
4014
4015 gcc_assert (insn_cnt1 == insn_cnt2);
4016 }
4017
4018 /* If we have assembler epilogues, the block falling through to exit must
4019 be the last one in the reordered chain when we reach final. Ensure
4020 that this condition is met. */
4021 static void
fixup_fallthru_exit_predecessor(void)4022 fixup_fallthru_exit_predecessor (void)
4023 {
4024 edge e;
4025 basic_block bb = NULL;
4026
4027 /* This transformation is not valid before reload, because we might
4028 separate a call from the instruction that copies the return
4029 value. */
4030 gcc_assert (reload_completed);
4031
4032 e = find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4033 if (e)
4034 bb = e->src;
4035
4036 if (bb && bb->aux)
4037 {
4038 basic_block c = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
4039
4040 /* If the very first block is the one with the fall-through exit
4041 edge, we have to split that block. */
4042 if (c == bb)
4043 {
4044 bb = split_block_after_labels (bb)->dest;
4045 bb->aux = c->aux;
4046 c->aux = bb;
4047 BB_FOOTER (bb) = BB_FOOTER (c);
4048 BB_FOOTER (c) = NULL;
4049 }
4050
4051 while (c->aux != bb)
4052 c = (basic_block) c->aux;
4053
4054 c->aux = bb->aux;
4055 while (c->aux)
4056 c = (basic_block) c->aux;
4057
4058 c->aux = bb;
4059 bb->aux = NULL;
4060 }
4061 }
4062
4063 /* In case there are more than one fallthru predecessors of exit, force that
4064 there is only one. */
4065
4066 static void
force_one_exit_fallthru(void)4067 force_one_exit_fallthru (void)
4068 {
4069 edge e, predecessor = NULL;
4070 bool more = false;
4071 edge_iterator ei;
4072 basic_block forwarder, bb;
4073
4074 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
4075 if (e->flags & EDGE_FALLTHRU)
4076 {
4077 if (predecessor == NULL)
4078 predecessor = e;
4079 else
4080 {
4081 more = true;
4082 break;
4083 }
4084 }
4085
4086 if (!more)
4087 return;
4088
4089 /* Exit has several fallthru predecessors. Create a forwarder block for
4090 them. */
4091 forwarder = split_edge (predecessor);
4092 for (ei = ei_start (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4093 (e = ei_safe_edge (ei)); )
4094 {
4095 if (e->src == forwarder
4096 || !(e->flags & EDGE_FALLTHRU))
4097 ei_next (&ei);
4098 else
4099 redirect_edge_and_branch_force (e, forwarder);
4100 }
4101
4102 /* Fix up the chain of blocks -- make FORWARDER immediately precede the
4103 exit block. */
4104 FOR_EACH_BB_FN (bb, cfun)
4105 {
4106 if (bb->aux == NULL && bb != forwarder)
4107 {
4108 bb->aux = forwarder;
4109 break;
4110 }
4111 }
4112 }
4113
4114 /* Return true in case it is possible to duplicate the basic block BB. */
4115
4116 static bool
cfg_layout_can_duplicate_bb_p(const_basic_block bb)4117 cfg_layout_can_duplicate_bb_p (const_basic_block bb)
4118 {
4119 /* Do not attempt to duplicate tablejumps, as we need to unshare
4120 the dispatch table. This is difficult to do, as the instructions
4121 computing jump destination may be hoisted outside the basic block. */
4122 if (tablejump_p (BB_END (bb), NULL, NULL))
4123 return false;
4124
4125 /* Do not duplicate blocks containing insns that can't be copied. */
4126 if (targetm.cannot_copy_insn_p)
4127 {
4128 rtx_insn *insn = BB_HEAD (bb);
4129 while (1)
4130 {
4131 if (INSN_P (insn) && targetm.cannot_copy_insn_p (insn))
4132 return false;
4133 if (insn == BB_END (bb))
4134 break;
4135 insn = NEXT_INSN (insn);
4136 }
4137 }
4138
4139 return true;
4140 }
4141
4142 rtx_insn *
duplicate_insn_chain(rtx_insn * from,rtx_insn * to)4143 duplicate_insn_chain (rtx_insn *from, rtx_insn *to)
4144 {
4145 rtx_insn *insn, *next, *copy;
4146 rtx_note *last;
4147
4148 /* Avoid updating of boundaries of previous basic block. The
4149 note will get removed from insn stream in fixup. */
4150 last = emit_note (NOTE_INSN_DELETED);
4151
4152 /* Create copy at the end of INSN chain. The chain will
4153 be reordered later. */
4154 for (insn = from; insn != NEXT_INSN (to); insn = NEXT_INSN (insn))
4155 {
4156 switch (GET_CODE (insn))
4157 {
4158 case DEBUG_INSN:
4159 /* Don't duplicate label debug insns. */
4160 if (DEBUG_BIND_INSN_P (insn)
4161 && TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL)
4162 break;
4163 /* FALLTHRU */
4164 case INSN:
4165 case CALL_INSN:
4166 case JUMP_INSN:
4167 copy = emit_copy_of_insn_after (insn, get_last_insn ());
4168 if (JUMP_P (insn) && JUMP_LABEL (insn) != NULL_RTX
4169 && ANY_RETURN_P (JUMP_LABEL (insn)))
4170 JUMP_LABEL (copy) = JUMP_LABEL (insn);
4171 maybe_copy_prologue_epilogue_insn (insn, copy);
4172 break;
4173
4174 case JUMP_TABLE_DATA:
4175 /* Avoid copying of dispatch tables. We never duplicate
4176 tablejumps, so this can hit only in case the table got
4177 moved far from original jump.
4178 Avoid copying following barrier as well if any
4179 (and debug insns in between). */
4180 for (next = NEXT_INSN (insn);
4181 next != NEXT_INSN (to);
4182 next = NEXT_INSN (next))
4183 if (!DEBUG_INSN_P (next))
4184 break;
4185 if (next != NEXT_INSN (to) && BARRIER_P (next))
4186 insn = next;
4187 break;
4188
4189 case CODE_LABEL:
4190 break;
4191
4192 case BARRIER:
4193 emit_barrier ();
4194 break;
4195
4196 case NOTE:
4197 switch (NOTE_KIND (insn))
4198 {
4199 /* In case prologue is empty and function contain label
4200 in first BB, we may want to copy the block. */
4201 case NOTE_INSN_PROLOGUE_END:
4202
4203 case NOTE_INSN_DELETED:
4204 case NOTE_INSN_DELETED_LABEL:
4205 case NOTE_INSN_DELETED_DEBUG_LABEL:
4206 /* No problem to strip these. */
4207 case NOTE_INSN_FUNCTION_BEG:
4208 /* There is always just single entry to function. */
4209 case NOTE_INSN_BASIC_BLOCK:
4210 /* We should only switch text sections once. */
4211 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
4212 break;
4213
4214 case NOTE_INSN_EPILOGUE_BEG:
4215 case NOTE_INSN_UPDATE_SJLJ_CONTEXT:
4216 emit_note_copy (as_a <rtx_note *> (insn));
4217 break;
4218
4219 default:
4220 /* All other notes should have already been eliminated. */
4221 gcc_unreachable ();
4222 }
4223 break;
4224 default:
4225 gcc_unreachable ();
4226 }
4227 }
4228 insn = NEXT_INSN (last);
4229 delete_insn (last);
4230 return insn;
4231 }
4232
4233 /* Create a duplicate of the basic block BB. */
4234
4235 static basic_block
cfg_layout_duplicate_bb(basic_block bb,copy_bb_data *)4236 cfg_layout_duplicate_bb (basic_block bb, copy_bb_data *)
4237 {
4238 rtx_insn *insn;
4239 basic_block new_bb;
4240
4241 insn = duplicate_insn_chain (BB_HEAD (bb), BB_END (bb));
4242 new_bb = create_basic_block (insn,
4243 insn ? get_last_insn () : NULL,
4244 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
4245
4246 BB_COPY_PARTITION (new_bb, bb);
4247 if (BB_HEADER (bb))
4248 {
4249 insn = BB_HEADER (bb);
4250 while (NEXT_INSN (insn))
4251 insn = NEXT_INSN (insn);
4252 insn = duplicate_insn_chain (BB_HEADER (bb), insn);
4253 if (insn)
4254 BB_HEADER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4255 }
4256
4257 if (BB_FOOTER (bb))
4258 {
4259 insn = BB_FOOTER (bb);
4260 while (NEXT_INSN (insn))
4261 insn = NEXT_INSN (insn);
4262 insn = duplicate_insn_chain (BB_FOOTER (bb), insn);
4263 if (insn)
4264 BB_FOOTER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4265 }
4266
4267 return new_bb;
4268 }
4269
4270
4271 /* Main entry point to this module - initialize the datastructures for
4272 CFG layout changes. It keeps LOOPS up-to-date if not null.
4273
4274 FLAGS is a set of additional flags to pass to cleanup_cfg(). */
4275
4276 void
cfg_layout_initialize(int flags)4277 cfg_layout_initialize (int flags)
4278 {
4279 rtx_insn_list *x;
4280 basic_block bb;
4281
4282 /* Once bb partitioning is complete, cfg layout mode should not be
4283 re-entered. Entering cfg layout mode may require fixups. As an
4284 example, if edge forwarding performed when optimizing the cfg
4285 layout required moving a block from the hot to the cold
4286 section. This would create an illegal partitioning unless some
4287 manual fixup was performed. */
4288 gcc_assert (!crtl->bb_reorder_complete || !crtl->has_bb_partition);
4289
4290 initialize_original_copy_tables ();
4291
4292 cfg_layout_rtl_register_cfg_hooks ();
4293
4294 record_effective_endpoints ();
4295
4296 /* Make sure that the targets of non local gotos are marked. */
4297 for (x = nonlocal_goto_handler_labels; x; x = x->next ())
4298 {
4299 bb = BLOCK_FOR_INSN (x->insn ());
4300 bb->flags |= BB_NON_LOCAL_GOTO_TARGET;
4301 }
4302
4303 cleanup_cfg (CLEANUP_CFGLAYOUT | flags);
4304 }
4305
4306 /* Splits superblocks. */
4307 void
break_superblocks(void)4308 break_superblocks (void)
4309 {
4310 bool need = false;
4311 basic_block bb;
4312
4313 auto_sbitmap superblocks (last_basic_block_for_fn (cfun));
4314 bitmap_clear (superblocks);
4315
4316 FOR_EACH_BB_FN (bb, cfun)
4317 if (bb->flags & BB_SUPERBLOCK)
4318 {
4319 bb->flags &= ~BB_SUPERBLOCK;
4320 bitmap_set_bit (superblocks, bb->index);
4321 need = true;
4322 }
4323
4324 if (need)
4325 {
4326 rebuild_jump_labels (get_insns ());
4327 find_many_sub_basic_blocks (superblocks);
4328 }
4329 }
4330
4331 /* Finalize the changes: reorder insn list according to the sequence specified
4332 by aux pointers, enter compensation code, rebuild scope forest. */
4333
4334 void
cfg_layout_finalize(void)4335 cfg_layout_finalize (void)
4336 {
4337 free_dominance_info (CDI_DOMINATORS);
4338 force_one_exit_fallthru ();
4339 rtl_register_cfg_hooks ();
4340 if (reload_completed && !targetm.have_epilogue ())
4341 fixup_fallthru_exit_predecessor ();
4342 fixup_reorder_chain ();
4343
4344 rebuild_jump_labels (get_insns ());
4345 delete_dead_jumptables ();
4346
4347 if (flag_checking)
4348 verify_insn_chain ();
4349 checking_verify_flow_info ();
4350 }
4351
4352
4353 /* Same as split_block but update cfg_layout structures. */
4354
4355 static basic_block
cfg_layout_split_block(basic_block bb,void * insnp)4356 cfg_layout_split_block (basic_block bb, void *insnp)
4357 {
4358 rtx insn = (rtx) insnp;
4359 basic_block new_bb = rtl_split_block (bb, insn);
4360
4361 BB_FOOTER (new_bb) = BB_FOOTER (bb);
4362 BB_FOOTER (bb) = NULL;
4363
4364 return new_bb;
4365 }
4366
4367 /* Redirect Edge to DEST. */
4368 static edge
cfg_layout_redirect_edge_and_branch(edge e,basic_block dest)4369 cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
4370 {
4371 basic_block src = e->src;
4372 edge ret;
4373
4374 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
4375 return NULL;
4376
4377 if (e->dest == dest)
4378 return e;
4379
4380 if (e->flags & EDGE_CROSSING
4381 && BB_PARTITION (e->src) == BB_PARTITION (dest)
4382 && simplejump_p (BB_END (src)))
4383 {
4384 if (dump_file)
4385 fprintf (dump_file,
4386 "Removing crossing jump while redirecting edge form %i to %i\n",
4387 e->src->index, dest->index);
4388 delete_insn (BB_END (src));
4389 remove_barriers_from_footer (src);
4390 e->flags |= EDGE_FALLTHRU;
4391 }
4392
4393 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4394 && (ret = try_redirect_by_replacing_jump (e, dest, true)))
4395 {
4396 df_set_bb_dirty (src);
4397 return ret;
4398 }
4399
4400 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4401 && (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX))
4402 {
4403 if (dump_file)
4404 fprintf (dump_file, "Redirecting entry edge from bb %i to %i\n",
4405 e->src->index, dest->index);
4406
4407 df_set_bb_dirty (e->src);
4408 redirect_edge_succ (e, dest);
4409 return e;
4410 }
4411
4412 /* Redirect_edge_and_branch may decide to turn branch into fallthru edge
4413 in the case the basic block appears to be in sequence. Avoid this
4414 transformation. */
4415
4416 if (e->flags & EDGE_FALLTHRU)
4417 {
4418 /* Redirect any branch edges unified with the fallthru one. */
4419 if (JUMP_P (BB_END (src))
4420 && label_is_jump_target_p (BB_HEAD (e->dest),
4421 BB_END (src)))
4422 {
4423 edge redirected;
4424
4425 if (dump_file)
4426 fprintf (dump_file, "Fallthru edge unified with branch "
4427 "%i->%i redirected to %i\n",
4428 e->src->index, e->dest->index, dest->index);
4429 e->flags &= ~EDGE_FALLTHRU;
4430 redirected = redirect_branch_edge (e, dest);
4431 gcc_assert (redirected);
4432 redirected->flags |= EDGE_FALLTHRU;
4433 df_set_bb_dirty (redirected->src);
4434 return redirected;
4435 }
4436 /* In case we are redirecting fallthru edge to the branch edge
4437 of conditional jump, remove it. */
4438 if (EDGE_COUNT (src->succs) == 2)
4439 {
4440 /* Find the edge that is different from E. */
4441 edge s = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e);
4442
4443 if (s->dest == dest
4444 && any_condjump_p (BB_END (src))
4445 && onlyjump_p (BB_END (src)))
4446 delete_insn (BB_END (src));
4447 }
4448 if (dump_file)
4449 fprintf (dump_file, "Redirecting fallthru edge %i->%i to %i\n",
4450 e->src->index, e->dest->index, dest->index);
4451 ret = redirect_edge_succ_nodup (e, dest);
4452 }
4453 else
4454 ret = redirect_branch_edge (e, dest);
4455
4456 if (!ret)
4457 return NULL;
4458
4459 fixup_partition_crossing (ret);
4460 /* We don't want simplejumps in the insn stream during cfglayout. */
4461 gcc_assert (!simplejump_p (BB_END (src)) || CROSSING_JUMP_P (BB_END (src)));
4462
4463 df_set_bb_dirty (src);
4464 return ret;
4465 }
4466
4467 /* Simple wrapper as we always can redirect fallthru edges. */
4468 static basic_block
cfg_layout_redirect_edge_and_branch_force(edge e,basic_block dest)4469 cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest)
4470 {
4471 edge redirected = cfg_layout_redirect_edge_and_branch (e, dest);
4472
4473 gcc_assert (redirected);
4474 return NULL;
4475 }
4476
4477 /* Same as delete_basic_block but update cfg_layout structures. */
4478
4479 static void
cfg_layout_delete_block(basic_block bb)4480 cfg_layout_delete_block (basic_block bb)
4481 {
4482 rtx_insn *insn, *next, *prev = PREV_INSN (BB_HEAD (bb)), *remaints;
4483 rtx_insn **to;
4484
4485 if (BB_HEADER (bb))
4486 {
4487 next = BB_HEAD (bb);
4488 if (prev)
4489 SET_NEXT_INSN (prev) = BB_HEADER (bb);
4490 else
4491 set_first_insn (BB_HEADER (bb));
4492 SET_PREV_INSN (BB_HEADER (bb)) = prev;
4493 insn = BB_HEADER (bb);
4494 while (NEXT_INSN (insn))
4495 insn = NEXT_INSN (insn);
4496 SET_NEXT_INSN (insn) = next;
4497 SET_PREV_INSN (next) = insn;
4498 }
4499 next = NEXT_INSN (BB_END (bb));
4500 if (BB_FOOTER (bb))
4501 {
4502 insn = BB_FOOTER (bb);
4503 while (insn)
4504 {
4505 if (BARRIER_P (insn))
4506 {
4507 if (PREV_INSN (insn))
4508 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
4509 else
4510 BB_FOOTER (bb) = NEXT_INSN (insn);
4511 if (NEXT_INSN (insn))
4512 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
4513 }
4514 if (LABEL_P (insn))
4515 break;
4516 insn = NEXT_INSN (insn);
4517 }
4518 if (BB_FOOTER (bb))
4519 {
4520 insn = BB_END (bb);
4521 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
4522 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
4523 while (NEXT_INSN (insn))
4524 insn = NEXT_INSN (insn);
4525 SET_NEXT_INSN (insn) = next;
4526 if (next)
4527 SET_PREV_INSN (next) = insn;
4528 else
4529 set_last_insn (insn);
4530 }
4531 }
4532 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4533 to = &BB_HEADER (bb->next_bb);
4534 else
4535 to = &cfg_layout_function_footer;
4536
4537 rtl_delete_block (bb);
4538
4539 if (prev)
4540 prev = NEXT_INSN (prev);
4541 else
4542 prev = get_insns ();
4543 if (next)
4544 next = PREV_INSN (next);
4545 else
4546 next = get_last_insn ();
4547
4548 if (next && NEXT_INSN (next) != prev)
4549 {
4550 remaints = unlink_insn_chain (prev, next);
4551 insn = remaints;
4552 while (NEXT_INSN (insn))
4553 insn = NEXT_INSN (insn);
4554 SET_NEXT_INSN (insn) = *to;
4555 if (*to)
4556 SET_PREV_INSN (*to) = insn;
4557 *to = remaints;
4558 }
4559 }
4560
4561 /* Return true when blocks A and B can be safely merged. */
4562
4563 static bool
cfg_layout_can_merge_blocks_p(basic_block a,basic_block b)4564 cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
4565 {
4566 /* If we are partitioning hot/cold basic blocks, we don't want to
4567 mess up unconditional or indirect jumps that cross between hot
4568 and cold sections.
4569
4570 Basic block partitioning may result in some jumps that appear to
4571 be optimizable (or blocks that appear to be mergeable), but which really
4572 must be left untouched (they are required to make it safely across
4573 partition boundaries). See the comments at the top of
4574 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
4575
4576 if (BB_PARTITION (a) != BB_PARTITION (b))
4577 return false;
4578
4579 /* Protect the loop latches. */
4580 if (current_loops && b->loop_father->latch == b)
4581 return false;
4582
4583 /* If we would end up moving B's instructions, make sure it doesn't fall
4584 through into the exit block, since we cannot recover from a fallthrough
4585 edge into the exit block occurring in the middle of a function. */
4586 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4587 {
4588 edge e = find_fallthru_edge (b->succs);
4589 if (e && e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4590 return false;
4591 }
4592
4593 /* There must be exactly one edge in between the blocks. */
4594 return (single_succ_p (a)
4595 && single_succ (a) == b
4596 && single_pred_p (b) == 1
4597 && a != b
4598 /* Must be simple edge. */
4599 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
4600 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4601 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
4602 /* If the jump insn has side effects, we can't kill the edge.
4603 When not optimizing, try_redirect_by_replacing_jump will
4604 not allow us to redirect an edge by replacing a table jump. */
4605 && (!JUMP_P (BB_END (a))
4606 || ((!optimize || reload_completed)
4607 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
4608 }
4609
4610 /* Merge block A and B. The blocks must be mergeable. */
4611
4612 static void
cfg_layout_merge_blocks(basic_block a,basic_block b)4613 cfg_layout_merge_blocks (basic_block a, basic_block b)
4614 {
4615 bool forwarder_p = (b->flags & BB_FORWARDER_BLOCK) != 0;
4616 rtx_insn *insn;
4617
4618 gcc_checking_assert (cfg_layout_can_merge_blocks_p (a, b));
4619
4620 if (dump_file)
4621 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
4622 a->index);
4623
4624 /* If there was a CODE_LABEL beginning B, delete it. */
4625 if (LABEL_P (BB_HEAD (b)))
4626 {
4627 delete_insn (BB_HEAD (b));
4628 }
4629
4630 /* We should have fallthru edge in a, or we can do dummy redirection to get
4631 it cleaned up. */
4632 if (JUMP_P (BB_END (a)))
4633 try_redirect_by_replacing_jump (EDGE_SUCC (a, 0), b, true);
4634 gcc_assert (!JUMP_P (BB_END (a)));
4635
4636 /* When not optimizing and the edge is the only place in RTL which holds
4637 some unique locus, emit a nop with that locus in between. */
4638 if (!optimize)
4639 emit_nop_for_unique_locus_between (a, b);
4640
4641 /* Move things from b->footer after a->footer. */
4642 if (BB_FOOTER (b))
4643 {
4644 if (!BB_FOOTER (a))
4645 BB_FOOTER (a) = BB_FOOTER (b);
4646 else
4647 {
4648 rtx_insn *last = BB_FOOTER (a);
4649
4650 while (NEXT_INSN (last))
4651 last = NEXT_INSN (last);
4652 SET_NEXT_INSN (last) = BB_FOOTER (b);
4653 SET_PREV_INSN (BB_FOOTER (b)) = last;
4654 }
4655 BB_FOOTER (b) = NULL;
4656 }
4657
4658 /* Move things from b->header before a->footer.
4659 Note that this may include dead tablejump data, but we don't clean
4660 those up until we go out of cfglayout mode. */
4661 if (BB_HEADER (b))
4662 {
4663 if (! BB_FOOTER (a))
4664 BB_FOOTER (a) = BB_HEADER (b);
4665 else
4666 {
4667 rtx_insn *last = BB_HEADER (b);
4668
4669 while (NEXT_INSN (last))
4670 last = NEXT_INSN (last);
4671 SET_NEXT_INSN (last) = BB_FOOTER (a);
4672 SET_PREV_INSN (BB_FOOTER (a)) = last;
4673 BB_FOOTER (a) = BB_HEADER (b);
4674 }
4675 BB_HEADER (b) = NULL;
4676 }
4677
4678 /* In the case basic blocks are not adjacent, move them around. */
4679 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4680 {
4681 insn = unlink_insn_chain (BB_HEAD (b), BB_END (b));
4682
4683 emit_insn_after_noloc (insn, BB_END (a), a);
4684 }
4685 /* Otherwise just re-associate the instructions. */
4686 else
4687 {
4688 insn = BB_HEAD (b);
4689 BB_END (a) = BB_END (b);
4690 }
4691
4692 /* emit_insn_after_noloc doesn't call df_insn_change_bb.
4693 We need to explicitly call. */
4694 update_bb_for_insn_chain (insn, BB_END (b), a);
4695
4696 /* Skip possible DELETED_LABEL insn. */
4697 if (!NOTE_INSN_BASIC_BLOCK_P (insn))
4698 insn = NEXT_INSN (insn);
4699 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4700 BB_HEAD (b) = BB_END (b) = NULL;
4701 delete_insn (insn);
4702
4703 df_bb_delete (b->index);
4704
4705 /* If B was a forwarder block, propagate the locus on the edge. */
4706 if (forwarder_p
4707 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION)
4708 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
4709
4710 if (dump_file)
4711 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
4712 }
4713
4714 /* Split edge E. */
4715
4716 static basic_block
cfg_layout_split_edge(edge e)4717 cfg_layout_split_edge (edge e)
4718 {
4719 basic_block new_bb =
4720 create_basic_block (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4721 ? NEXT_INSN (BB_END (e->src)) : get_insns (),
4722 NULL_RTX, e->src);
4723
4724 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4725 BB_COPY_PARTITION (new_bb, e->src);
4726 else
4727 BB_COPY_PARTITION (new_bb, e->dest);
4728 make_edge (new_bb, e->dest, EDGE_FALLTHRU);
4729 redirect_edge_and_branch_force (e, new_bb);
4730
4731 return new_bb;
4732 }
4733
4734 /* Do postprocessing after making a forwarder block joined by edge FALLTHRU. */
4735
4736 static void
rtl_make_forwarder_block(edge fallthru ATTRIBUTE_UNUSED)4737 rtl_make_forwarder_block (edge fallthru ATTRIBUTE_UNUSED)
4738 {
4739 }
4740
4741 /* Return true if BB contains only labels or non-executable
4742 instructions. */
4743
4744 static bool
rtl_block_empty_p(basic_block bb)4745 rtl_block_empty_p (basic_block bb)
4746 {
4747 rtx_insn *insn;
4748
4749 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4750 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4751 return true;
4752
4753 FOR_BB_INSNS (bb, insn)
4754 if (NONDEBUG_INSN_P (insn) && !any_uncondjump_p (insn))
4755 return false;
4756
4757 return true;
4758 }
4759
4760 /* Split a basic block if it ends with a conditional branch and if
4761 the other part of the block is not empty. */
4762
4763 static basic_block
rtl_split_block_before_cond_jump(basic_block bb)4764 rtl_split_block_before_cond_jump (basic_block bb)
4765 {
4766 rtx_insn *insn;
4767 rtx_insn *split_point = NULL;
4768 rtx_insn *last = NULL;
4769 bool found_code = false;
4770
4771 FOR_BB_INSNS (bb, insn)
4772 {
4773 if (any_condjump_p (insn))
4774 split_point = last;
4775 else if (NONDEBUG_INSN_P (insn))
4776 found_code = true;
4777 last = insn;
4778 }
4779
4780 /* Did not find everything. */
4781 if (found_code && split_point)
4782 return split_block (bb, split_point)->dest;
4783 else
4784 return NULL;
4785 }
4786
4787 /* Return 1 if BB ends with a call, possibly followed by some
4788 instructions that must stay with the call, 0 otherwise. */
4789
4790 static bool
rtl_block_ends_with_call_p(basic_block bb)4791 rtl_block_ends_with_call_p (basic_block bb)
4792 {
4793 rtx_insn *insn = BB_END (bb);
4794
4795 while (!CALL_P (insn)
4796 && insn != BB_HEAD (bb)
4797 && (keep_with_call_p (insn)
4798 || NOTE_P (insn)
4799 || DEBUG_INSN_P (insn)))
4800 insn = PREV_INSN (insn);
4801 return (CALL_P (insn));
4802 }
4803
4804 /* Return 1 if BB ends with a conditional branch, 0 otherwise. */
4805
4806 static bool
rtl_block_ends_with_condjump_p(const_basic_block bb)4807 rtl_block_ends_with_condjump_p (const_basic_block bb)
4808 {
4809 return any_condjump_p (BB_END (bb));
4810 }
4811
4812 /* Return true if we need to add fake edge to exit.
4813 Helper function for rtl_flow_call_edges_add. */
4814
4815 static bool
need_fake_edge_p(const rtx_insn * insn)4816 need_fake_edge_p (const rtx_insn *insn)
4817 {
4818 if (!INSN_P (insn))
4819 return false;
4820
4821 if ((CALL_P (insn)
4822 && !SIBLING_CALL_P (insn)
4823 && !find_reg_note (insn, REG_NORETURN, NULL)
4824 && !(RTL_CONST_OR_PURE_CALL_P (insn))))
4825 return true;
4826
4827 return ((GET_CODE (PATTERN (insn)) == ASM_OPERANDS
4828 && MEM_VOLATILE_P (PATTERN (insn)))
4829 || (GET_CODE (PATTERN (insn)) == PARALLEL
4830 && asm_noperands (insn) != -1
4831 && MEM_VOLATILE_P (XVECEXP (PATTERN (insn), 0, 0)))
4832 || GET_CODE (PATTERN (insn)) == ASM_INPUT);
4833 }
4834
4835 /* Add fake edges to the function exit for any non constant and non noreturn
4836 calls, volatile inline assembly in the bitmap of blocks specified by
4837 BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks
4838 that were split.
4839
4840 The goal is to expose cases in which entering a basic block does not imply
4841 that all subsequent instructions must be executed. */
4842
4843 static int
rtl_flow_call_edges_add(sbitmap blocks)4844 rtl_flow_call_edges_add (sbitmap blocks)
4845 {
4846 int i;
4847 int blocks_split = 0;
4848 int last_bb = last_basic_block_for_fn (cfun);
4849 bool check_last_block = false;
4850
4851 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
4852 return 0;
4853
4854 if (! blocks)
4855 check_last_block = true;
4856 else
4857 check_last_block = bitmap_bit_p (blocks,
4858 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb->index);
4859
4860 /* In the last basic block, before epilogue generation, there will be
4861 a fallthru edge to EXIT. Special care is required if the last insn
4862 of the last basic block is a call because make_edge folds duplicate
4863 edges, which would result in the fallthru edge also being marked
4864 fake, which would result in the fallthru edge being removed by
4865 remove_fake_edges, which would result in an invalid CFG.
4866
4867 Moreover, we can't elide the outgoing fake edge, since the block
4868 profiler needs to take this into account in order to solve the minimal
4869 spanning tree in the case that the call doesn't return.
4870
4871 Handle this by adding a dummy instruction in a new last basic block. */
4872 if (check_last_block)
4873 {
4874 basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
4875 rtx_insn *insn = BB_END (bb);
4876
4877 /* Back up past insns that must be kept in the same block as a call. */
4878 while (insn != BB_HEAD (bb)
4879 && keep_with_call_p (insn))
4880 insn = PREV_INSN (insn);
4881
4882 if (need_fake_edge_p (insn))
4883 {
4884 edge e;
4885
4886 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
4887 if (e)
4888 {
4889 insert_insn_on_edge (gen_use (const0_rtx), e);
4890 commit_edge_insertions ();
4891 }
4892 }
4893 }
4894
4895 /* Now add fake edges to the function exit for any non constant
4896 calls since there is no way that we can determine if they will
4897 return or not... */
4898
4899 for (i = NUM_FIXED_BLOCKS; i < last_bb; i++)
4900 {
4901 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
4902 rtx_insn *insn;
4903 rtx_insn *prev_insn;
4904
4905 if (!bb)
4906 continue;
4907
4908 if (blocks && !bitmap_bit_p (blocks, i))
4909 continue;
4910
4911 for (insn = BB_END (bb); ; insn = prev_insn)
4912 {
4913 prev_insn = PREV_INSN (insn);
4914 if (need_fake_edge_p (insn))
4915 {
4916 edge e;
4917 rtx_insn *split_at_insn = insn;
4918
4919 /* Don't split the block between a call and an insn that should
4920 remain in the same block as the call. */
4921 if (CALL_P (insn))
4922 while (split_at_insn != BB_END (bb)
4923 && keep_with_call_p (NEXT_INSN (split_at_insn)))
4924 split_at_insn = NEXT_INSN (split_at_insn);
4925
4926 /* The handling above of the final block before the epilogue
4927 should be enough to verify that there is no edge to the exit
4928 block in CFG already. Calling make_edge in such case would
4929 cause us to mark that edge as fake and remove it later. */
4930
4931 if (flag_checking && split_at_insn == BB_END (bb))
4932 {
4933 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
4934 gcc_assert (e == NULL);
4935 }
4936
4937 /* Note that the following may create a new basic block
4938 and renumber the existing basic blocks. */
4939 if (split_at_insn != BB_END (bb))
4940 {
4941 e = split_block (bb, split_at_insn);
4942 if (e)
4943 blocks_split++;
4944 }
4945
4946 edge ne = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
4947 ne->probability = profile_probability::guessed_never ();
4948 }
4949
4950 if (insn == BB_HEAD (bb))
4951 break;
4952 }
4953 }
4954
4955 if (blocks_split)
4956 verify_flow_info ();
4957
4958 return blocks_split;
4959 }
4960
4961 /* Add COMP_RTX as a condition at end of COND_BB. FIRST_HEAD is
4962 the conditional branch target, SECOND_HEAD should be the fall-thru
4963 there is no need to handle this here the loop versioning code handles
4964 this. the reason for SECON_HEAD is that it is needed for condition
4965 in trees, and this should be of the same type since it is a hook. */
4966 static void
rtl_lv_add_condition_to_bb(basic_block first_head,basic_block second_head ATTRIBUTE_UNUSED,basic_block cond_bb,void * comp_rtx)4967 rtl_lv_add_condition_to_bb (basic_block first_head ,
4968 basic_block second_head ATTRIBUTE_UNUSED,
4969 basic_block cond_bb, void *comp_rtx)
4970 {
4971 rtx_code_label *label;
4972 rtx_insn *seq, *jump;
4973 rtx op0 = XEXP ((rtx)comp_rtx, 0);
4974 rtx op1 = XEXP ((rtx)comp_rtx, 1);
4975 enum rtx_code comp = GET_CODE ((rtx)comp_rtx);
4976 machine_mode mode;
4977
4978
4979 label = block_label (first_head);
4980 mode = GET_MODE (op0);
4981 if (mode == VOIDmode)
4982 mode = GET_MODE (op1);
4983
4984 start_sequence ();
4985 op0 = force_operand (op0, NULL_RTX);
4986 op1 = force_operand (op1, NULL_RTX);
4987 do_compare_rtx_and_jump (op0, op1, comp, 0, mode, NULL_RTX, NULL, label,
4988 profile_probability::uninitialized ());
4989 jump = get_last_insn ();
4990 JUMP_LABEL (jump) = label;
4991 LABEL_NUSES (label)++;
4992 seq = get_insns ();
4993 end_sequence ();
4994
4995 /* Add the new cond, in the new head. */
4996 emit_insn_after (seq, BB_END (cond_bb));
4997 }
4998
4999
5000 /* Given a block B with unconditional branch at its end, get the
5001 store the return the branch edge and the fall-thru edge in
5002 BRANCH_EDGE and FALLTHRU_EDGE respectively. */
5003 static void
rtl_extract_cond_bb_edges(basic_block b,edge * branch_edge,edge * fallthru_edge)5004 rtl_extract_cond_bb_edges (basic_block b, edge *branch_edge,
5005 edge *fallthru_edge)
5006 {
5007 edge e = EDGE_SUCC (b, 0);
5008
5009 if (e->flags & EDGE_FALLTHRU)
5010 {
5011 *fallthru_edge = e;
5012 *branch_edge = EDGE_SUCC (b, 1);
5013 }
5014 else
5015 {
5016 *branch_edge = e;
5017 *fallthru_edge = EDGE_SUCC (b, 1);
5018 }
5019 }
5020
5021 void
init_rtl_bb_info(basic_block bb)5022 init_rtl_bb_info (basic_block bb)
5023 {
5024 gcc_assert (!bb->il.x.rtl);
5025 bb->il.x.head_ = NULL;
5026 bb->il.x.rtl = ggc_cleared_alloc<rtl_bb_info> ();
5027 }
5028
5029 /* Returns true if it is possible to remove edge E by redirecting
5030 it to the destination of the other edge from E->src. */
5031
5032 static bool
rtl_can_remove_branch_p(const_edge e)5033 rtl_can_remove_branch_p (const_edge e)
5034 {
5035 const_basic_block src = e->src;
5036 const_basic_block target = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest;
5037 const rtx_insn *insn = BB_END (src);
5038 rtx set;
5039
5040 /* The conditions are taken from try_redirect_by_replacing_jump. */
5041 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
5042 return false;
5043
5044 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
5045 return false;
5046
5047 if (BB_PARTITION (src) != BB_PARTITION (target))
5048 return false;
5049
5050 if (!onlyjump_p (insn)
5051 || tablejump_p (insn, NULL, NULL))
5052 return false;
5053
5054 set = single_set (insn);
5055 if (!set || side_effects_p (set))
5056 return false;
5057
5058 return true;
5059 }
5060
5061 static basic_block
rtl_duplicate_bb(basic_block bb,copy_bb_data * id)5062 rtl_duplicate_bb (basic_block bb, copy_bb_data *id)
5063 {
5064 bb = cfg_layout_duplicate_bb (bb, id);
5065 bb->aux = NULL;
5066 return bb;
5067 }
5068
5069 /* Do book-keeping of basic block BB for the profile consistency checker.
5070 If AFTER_PASS is 0, do pre-pass accounting, or if AFTER_PASS is 1
5071 then do post-pass accounting. Store the counting in RECORD. */
5072 static void
rtl_account_profile_record(basic_block bb,int after_pass,struct profile_record * record)5073 rtl_account_profile_record (basic_block bb, int after_pass,
5074 struct profile_record *record)
5075 {
5076 rtx_insn *insn;
5077 FOR_BB_INSNS (bb, insn)
5078 if (INSN_P (insn))
5079 {
5080 record->size[after_pass] += insn_cost (insn, false);
5081 if (bb->count.initialized_p ())
5082 record->time[after_pass]
5083 += insn_cost (insn, true) * bb->count.to_gcov_type ();
5084 else if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
5085 record->time[after_pass]
5086 += insn_cost (insn, true) * bb->count.to_frequency (cfun);
5087 }
5088 }
5089
5090 /* Implementation of CFG manipulation for linearized RTL. */
5091 struct cfg_hooks rtl_cfg_hooks = {
5092 "rtl",
5093 rtl_verify_flow_info,
5094 rtl_dump_bb,
5095 rtl_dump_bb_for_graph,
5096 rtl_create_basic_block,
5097 rtl_redirect_edge_and_branch,
5098 rtl_redirect_edge_and_branch_force,
5099 rtl_can_remove_branch_p,
5100 rtl_delete_block,
5101 rtl_split_block,
5102 rtl_move_block_after,
5103 rtl_can_merge_blocks, /* can_merge_blocks_p */
5104 rtl_merge_blocks,
5105 rtl_predict_edge,
5106 rtl_predicted_by_p,
5107 cfg_layout_can_duplicate_bb_p,
5108 rtl_duplicate_bb,
5109 rtl_split_edge,
5110 rtl_make_forwarder_block,
5111 rtl_tidy_fallthru_edge,
5112 rtl_force_nonfallthru,
5113 rtl_block_ends_with_call_p,
5114 rtl_block_ends_with_condjump_p,
5115 rtl_flow_call_edges_add,
5116 NULL, /* execute_on_growing_pred */
5117 NULL, /* execute_on_shrinking_pred */
5118 NULL, /* duplicate loop for trees */
5119 NULL, /* lv_add_condition_to_bb */
5120 NULL, /* lv_adjust_loop_header_phi*/
5121 NULL, /* extract_cond_bb_edges */
5122 NULL, /* flush_pending_stmts */
5123 rtl_block_empty_p, /* block_empty_p */
5124 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5125 rtl_account_profile_record,
5126 };
5127
5128 /* Implementation of CFG manipulation for cfg layout RTL, where
5129 basic block connected via fallthru edges does not have to be adjacent.
5130 This representation will hopefully become the default one in future
5131 version of the compiler. */
5132
5133 struct cfg_hooks cfg_layout_rtl_cfg_hooks = {
5134 "cfglayout mode",
5135 rtl_verify_flow_info_1,
5136 rtl_dump_bb,
5137 rtl_dump_bb_for_graph,
5138 cfg_layout_create_basic_block,
5139 cfg_layout_redirect_edge_and_branch,
5140 cfg_layout_redirect_edge_and_branch_force,
5141 rtl_can_remove_branch_p,
5142 cfg_layout_delete_block,
5143 cfg_layout_split_block,
5144 rtl_move_block_after,
5145 cfg_layout_can_merge_blocks_p,
5146 cfg_layout_merge_blocks,
5147 rtl_predict_edge,
5148 rtl_predicted_by_p,
5149 cfg_layout_can_duplicate_bb_p,
5150 cfg_layout_duplicate_bb,
5151 cfg_layout_split_edge,
5152 rtl_make_forwarder_block,
5153 NULL, /* tidy_fallthru_edge */
5154 rtl_force_nonfallthru,
5155 rtl_block_ends_with_call_p,
5156 rtl_block_ends_with_condjump_p,
5157 rtl_flow_call_edges_add,
5158 NULL, /* execute_on_growing_pred */
5159 NULL, /* execute_on_shrinking_pred */
5160 duplicate_loop_to_header_edge, /* duplicate loop for trees */
5161 rtl_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
5162 NULL, /* lv_adjust_loop_header_phi*/
5163 rtl_extract_cond_bb_edges, /* extract_cond_bb_edges */
5164 NULL, /* flush_pending_stmts */
5165 rtl_block_empty_p, /* block_empty_p */
5166 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5167 rtl_account_profile_record,
5168 };
5169
5170 #include "gt-cfgrtl.h"
5171