1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
24
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to it's
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
33
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "basic-block.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "expr.h"
52
53 /* cleanup_cfg maintains following flags for each basic block. */
54
55 enum bb_flags
56 {
57 /* Set if BB is the forwarder block to avoid too many
58 forwarder_block_p calls. */
59 BB_FORWARDER_BLOCK = 1,
60 BB_NONTHREADABLE_BLOCK = 2
61 };
62
63 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
64 #define BB_SET_FLAG(BB, FLAG) \
65 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
66 #define BB_CLEAR_FLAG(BB, FLAG) \
67 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
68
69 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
70
71 static bool try_crossjump_to_edge (int, edge, edge);
72 static bool try_crossjump_bb (int, basic_block);
73 static bool outgoing_edges_match (int, basic_block, basic_block);
74 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
75 static bool insns_match_p (int, rtx, rtx);
76
77 static bool tail_recursion_label_p (rtx);
78 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
79 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
80 static bool try_optimize_cfg (int);
81 static bool try_simplify_condjump (basic_block);
82 static bool try_forward_edges (int, basic_block);
83 static edge thread_jump (int, edge, basic_block);
84 static bool mark_effect (rtx, bitmap);
85 static void notice_new_block (basic_block);
86 static void update_forwarder_flag (basic_block);
87 static int mentions_nonequal_regs (rtx *, void *);
88 static void merge_memattrs (rtx, rtx);
89
90 /* Set flags for newly created block. */
91
92 static void
93 notice_new_block (basic_block bb)
94 {
95 if (!bb)
96 return;
97
98 if (forwarder_block_p (bb))
99 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
100 }
101
102 /* Recompute forwarder flag after block has been modified. */
103
104 static void
105 update_forwarder_flag (basic_block bb)
106 {
107 if (forwarder_block_p (bb))
108 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
109 else
110 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
111 }
112
113 /* Simplify a conditional jump around an unconditional jump.
114 Return true if something changed. */
115
116 static bool
117 try_simplify_condjump (basic_block cbranch_block)
118 {
119 basic_block jump_block, jump_dest_block, cbranch_dest_block;
120 edge cbranch_jump_edge, cbranch_fallthru_edge;
121 rtx cbranch_insn;
122 rtx insn, next;
123 rtx end;
124
125 /* Verify that there are exactly two successors. */
126 if (!cbranch_block->succ
127 || !cbranch_block->succ->succ_next
128 || cbranch_block->succ->succ_next->succ_next)
129 return false;
130
131 /* Verify that we've got a normal conditional branch at the end
132 of the block. */
133 cbranch_insn = BB_END (cbranch_block);
134 if (!any_condjump_p (cbranch_insn))
135 return false;
136
137 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
138 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
139
140 /* The next block must not have multiple predecessors, must not
141 be the last block in the function, and must contain just the
142 unconditional jump. */
143 jump_block = cbranch_fallthru_edge->dest;
144 if (jump_block->pred->pred_next
145 || jump_block->next_bb == EXIT_BLOCK_PTR
146 || !FORWARDER_BLOCK_P (jump_block))
147 return false;
148 jump_dest_block = jump_block->succ->dest;
149
150 /* The conditional branch must target the block after the
151 unconditional branch. */
152 cbranch_dest_block = cbranch_jump_edge->dest;
153
154 if (!can_fallthru (jump_block, cbranch_dest_block))
155 return false;
156
157 /* Invert the conditional branch. */
158 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
159 return false;
160
161 if (rtl_dump_file)
162 fprintf (rtl_dump_file, "Simplifying condjump %i around jump %i\n",
163 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
164
165 /* Success. Update the CFG to match. Note that after this point
166 the edge variable names appear backwards; the redirection is done
167 this way to preserve edge profile data. */
168 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
169 cbranch_dest_block);
calls_function(tree exp,int which)170 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
171 jump_dest_block);
172 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
173 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
174 update_br_prob_note (cbranch_block);
175
176 end = BB_END (jump_block);
177 /* Deleting a block may produce unreachable code warning even when we are
178 not deleting anything live. Suppress it by moving all the line number
179 notes out of the block. */
180 for (insn = BB_HEAD (jump_block); insn != NEXT_INSN (BB_END (jump_block));
181 insn = next)
182 {
calls_function_1(tree exp,int which)183 next = NEXT_INSN (insn);
184 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
185 {
186 if (insn == BB_END (jump_block))
187 {
188 BB_END (jump_block) = PREV_INSN (insn);
189 if (insn == end)
190 break;
191 }
192 reorder_insns_nobb (insn, insn, end);
193 end = insn;
194 }
195 }
196 /* Delete the block with the unconditional jump, and clean up the mess. */
197 delete_block (jump_block);
198 tidy_fallthru_edge (cbranch_jump_edge, cbranch_block, cbranch_dest_block);
199
200 return true;
201 }
202
203 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
204 on register. Used by jump threading. */
205
206 static bool
207 mark_effect (rtx exp, regset nonequal)
208 {
209 int regno;
210 rtx dest;
211 switch (GET_CODE (exp))
212 {
213 /* In case we do clobber the register, mark it as equal, as we know the
214 value is dead so it don't have to match. */
215 case CLOBBER:
216 if (REG_P (XEXP (exp, 0)))
217 {
218 dest = XEXP (exp, 0);
219 regno = REGNO (dest);
220 CLEAR_REGNO_REG_SET (nonequal, regno);
221 if (regno < FIRST_PSEUDO_REGISTER)
222 {
223 int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
224 while (--n > 0)
225 CLEAR_REGNO_REG_SET (nonequal, regno + n);
226 }
227 }
228 return false;
229
230 case SET:
231 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
232 return false;
233 dest = SET_DEST (exp);
234 if (dest == pc_rtx)
235 return false;
236 if (!REG_P (dest))
237 return true;
238 regno = REGNO (dest);
239 SET_REGNO_REG_SET (nonequal, regno);
240 if (regno < FIRST_PSEUDO_REGISTER)
241 {
242 int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
243 while (--n > 0)
244 SET_REGNO_REG_SET (nonequal, regno + n);
245 }
246 return false;
247
248 default:
249 return false;
250 }
251 }
252
253 /* Return nonzero if X is a register set in regset DATA.
254 Called via for_each_rtx. */
255 static int
256 mentions_nonequal_regs (rtx *x, void *data)
257 {
258 regset nonequal = (regset) data;
259 if (REG_P (*x))
260 {
261 int regno;
262
263 regno = REGNO (*x);
264 if (REGNO_REG_SET_P (nonequal, regno))
265 return 1;
266 if (regno < FIRST_PSEUDO_REGISTER)
267 {
268 int n = HARD_REGNO_NREGS (regno, GET_MODE (*x));
269 while (--n > 0)
270 if (REGNO_REG_SET_P (nonequal, regno + n))
271 return 1;
272 }
273 }
274 return 0;
275 }
276 /* Attempt to prove that the basic block B will have no side effects and
277 always continues in the same edge if reached via E. Return the edge
278 if exist, NULL otherwise. */
279
280 static edge
281 thread_jump (int mode, edge e, basic_block b)
282 {
prepare_call_address(rtx funexp,tree fndecl,rtx * call_fusage,int reg_parm_seen,int sibcallp)283 rtx set1, set2, cond1, cond2, insn;
284 enum rtx_code code1, code2, reversed_code2;
285 bool reverse1 = false;
286 int i;
287 regset nonequal;
288 bool failed = false;
289
290 if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK)
291 return NULL;
292
293 /* At the moment, we do handle only conditional jumps, but later we may
294 want to extend this code to tablejumps and others. */
295 if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next)
296 return NULL;
297 if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next)
298 {
299 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
300 return NULL;
301 }
302
303 /* Second branch must end with onlyjump, as we will eliminate the jump. */
304 if (!any_condjump_p (BB_END (e->src)))
305 return NULL;
306
307 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
308 {
309 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
310 return NULL;
311 }
312
313 set1 = pc_set (BB_END (e->src));
314 set2 = pc_set (BB_END (b));
315 if (((e->flags & EDGE_FALLTHRU) != 0)
316 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
317 reverse1 = true;
318
319 cond1 = XEXP (SET_SRC (set1), 0);
320 cond2 = XEXP (SET_SRC (set2), 0);
321 if (reverse1)
322 code1 = reversed_comparison_code (cond1, BB_END (e->src));
323 else
324 code1 = GET_CODE (cond1);
325
326 code2 = GET_CODE (cond2);
327 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
328
329 if (!comparison_dominates_p (code1, code2)
330 && !comparison_dominates_p (code1, reversed_code2))
331 return NULL;
332
333 /* Ensure that the comparison operators are equivalent.
334 ??? This is far too pessimistic. We should allow swapped operands,
335 different CCmodes, or for example comparisons for interval, that
336 dominate even when operands are not equivalent. */
337 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
338 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
339 return NULL;
340
341 /* Short circuit cases where block B contains some side effects, as we can't
342 safely bypass it. */
343 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
344 insn = NEXT_INSN (insn))
345 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
346 {
347 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
348 return NULL;
349 }
350
351 cselib_init ();
352
353 /* First process all values computed in the source basic block. */
354 for (insn = NEXT_INSN (BB_HEAD (e->src)); insn != NEXT_INSN (BB_END (e->src));
355 insn = NEXT_INSN (insn))
356 if (INSN_P (insn))
357 cselib_process_insn (insn);
358
359 nonequal = BITMAP_XMALLOC();
360 CLEAR_REG_SET (nonequal);
361
362 /* Now assume that we've continued by the edge E to B and continue
363 processing as if it were same basic block.
364 Our goal is to prove that whole block is an NOOP. */
emit_call_1(rtx funexp,tree fndecl ATTRIBUTE_UNUSED,tree funtype ATTRIBUTE_UNUSED,HOST_WIDE_INT stack_size ATTRIBUTE_UNUSED,HOST_WIDE_INT rounded_stack_size,HOST_WIDE_INT struct_value_size ATTRIBUTE_UNUSED,rtx next_arg_reg ATTRIBUTE_UNUSED,rtx valreg,int old_inhibit_defer_pop,rtx call_fusage,int ecf_flags,CUMULATIVE_ARGS * args_so_far ATTRIBUTE_UNUSED)365
366 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b)) && !failed;
367 insn = NEXT_INSN (insn))
368 {
369 if (INSN_P (insn))
370 {
371 rtx pat = PATTERN (insn);
372
373 if (GET_CODE (pat) == PARALLEL)
374 {
375 for (i = 0; i < XVECLEN (pat, 0); i++)
376 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
377 }
378 else
379 failed |= mark_effect (pat, nonequal);
380 }
381
382 cselib_process_insn (insn);
383 }
384
385 /* Later we should clear nonequal of dead registers. So far we don't
386 have life information in cfg_cleanup. */
387 if (failed)
388 {
389 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
390 goto failed_exit;
391 }
392
393 /* cond2 must not mention any register that is not equal to the
394 former block. */
395 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
396 goto failed_exit;
397
398 /* In case liveness information is available, we need to prove equivalence
399 only of the live values. */
400 if (mode & CLEANUP_UPDATE_LIFE)
401 AND_REG_SET (nonequal, b->global_live_at_end);
402
403 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
404
405 BITMAP_XFREE (nonequal);
406 cselib_finish ();
407 if ((comparison_dominates_p (code1, code2) != 0)
408 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
409 return BRANCH_EDGE (b);
410 else
411 return FALLTHRU_EDGE (b);
412
413 failed_exit:
414 BITMAP_XFREE (nonequal);
415 cselib_finish ();
416 return NULL;
417 }
418
419 /* Attempt to forward edges leaving basic block B.
420 Return true if successful. */
421
422 static bool
423 try_forward_edges (int mode, basic_block b)
424 {
425 bool changed = false;
426 edge e, next, *threaded_edges = NULL;
427
428 for (e = b->succ; e; e = next)
429 {
430 basic_block target, first;
431 int counter;
432 bool threaded = false;
433 int nthreaded_edges = 0;
434
435 next = e->succ_next;
436
437 /* Skip complex edges because we don't know how to update them.
438
439 Still handle fallthru edges, as we can succeed to forward fallthru
440 edge to the same place as the branch edge of conditional branch
441 and turn conditional branch to an unconditional branch. */
442 if (e->flags & EDGE_COMPLEX)
443 continue;
444
445 target = first = e->dest;
446 counter = 0;
447
448 while (counter < n_basic_blocks)
449 {
450 basic_block new_target = NULL;
451 bool new_target_threaded = false;
452
453 if (FORWARDER_BLOCK_P (target)
454 && target->succ->dest != EXIT_BLOCK_PTR)
455 {
456 /* Bypass trivial infinite loops. */
457 if (target == target->succ->dest)
458 counter = n_basic_blocks;
459 new_target = target->succ->dest;
460 }
461
462 /* Allow to thread only over one edge at time to simplify updating
463 of probabilities. */
464 else if (mode & CLEANUP_THREADING)
465 {
466 edge t = thread_jump (mode, e, target);
467 if (t)
468 {
469 if (!threaded_edges)
470 threaded_edges = xmalloc (sizeof (*threaded_edges)
471 * n_basic_blocks);
472 else
473 {
474 int i;
475
476 /* Detect an infinite loop across blocks not
477 including the start block. */
478 for (i = 0; i < nthreaded_edges; ++i)
479 if (threaded_edges[i] == t)
480 break;
481 if (i < nthreaded_edges)
482 {
483 counter = n_basic_blocks;
484 break;
485 }
486 }
487
488 /* Detect an infinite loop across the start block. */
489 if (t->dest == b)
490 break;
491
492 if (nthreaded_edges >= n_basic_blocks)
493 abort ();
494 threaded_edges[nthreaded_edges++] = t;
495
496 new_target = t->dest;
497 new_target_threaded = true;
498 }
499 }
500
501 if (!new_target)
502 break;
503
504 /* Avoid killing of loop pre-headers, as it is the place loop
505 optimizer wants to hoist code to.
506
507 For fallthru forwarders, the LOOP_BEG note must appear between
508 the header of block and CODE_LABEL of the loop, for non forwarders
509 it must appear before the JUMP_INSN. */
510 if ((mode & CLEANUP_PRE_LOOP) && optimize)
511 {
512 rtx insn = (target->succ->flags & EDGE_FALLTHRU
513 ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
514
515 if (GET_CODE (insn) != NOTE)
516 insn = NEXT_INSN (insn);
517
518 for (; insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn);
519 insn = NEXT_INSN (insn))
520 if (GET_CODE (insn) == NOTE
521 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
522 break;
523
524 if (GET_CODE (insn) == NOTE)
525 break;
526
527 /* Do not clean up branches to just past the end of a loop
528 at this time; it can mess up the loop optimizer's
529 recognition of some patterns. */
530
531 insn = PREV_INSN (BB_HEAD (target));
532 if (insn && GET_CODE (insn) == NOTE
533 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
534 break;
535 }
536
537 counter++;
538 target = new_target;
539 threaded |= new_target_threaded;
540 }
541
542 if (counter >= n_basic_blocks)
543 {
544 if (rtl_dump_file)
545 fprintf (rtl_dump_file, "Infinite loop in BB %i.\n",
546 target->index);
547 }
548 else if (target == first)
549 ; /* We didn't do anything. */
550 else
551 {
552 /* Save the values now, as the edge may get removed. */
553 gcov_type edge_count = e->count;
554 int edge_probability = e->probability;
555 int edge_frequency;
556 int n = 0;
557
558 /* Don't force if target is exit block. */
559 if (threaded && target != EXIT_BLOCK_PTR)
560 {
561 notice_new_block (redirect_edge_and_branch_force (e, target));
562 if (rtl_dump_file)
563 fprintf (rtl_dump_file, "Conditionals threaded.\n");
564 }
565 else if (!redirect_edge_and_branch (e, target))
566 {
567 if (rtl_dump_file)
568 fprintf (rtl_dump_file,
569 "Forwarding edge %i->%i to %i failed.\n",
570 b->index, e->dest->index, target->index);
571 continue;
572 }
573
574 /* We successfully forwarded the edge. Now update profile
575 data: for each edge we traversed in the chain, remove
576 the original edge's execution count. */
577 edge_frequency = ((edge_probability * b->frequency
578 + REG_BR_PROB_BASE / 2)
579 / REG_BR_PROB_BASE);
580
581 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
582 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
583
584 do
585 {
586 edge t;
587
588 first->count -= edge_count;
589 if (first->count < 0)
590 first->count = 0;
special_function_p(tree fndecl,int flags)591 first->frequency -= edge_frequency;
592 if (first->frequency < 0)
593 first->frequency = 0;
594 if (first->succ->succ_next)
595 {
596 edge e;
597 int prob;
598 if (n >= nthreaded_edges)
599 abort ();
600 t = threaded_edges [n++];
601 if (t->src != first)
602 abort ();
603 if (first->frequency)
604 prob = edge_frequency * REG_BR_PROB_BASE / first->frequency;
605 else
606 prob = 0;
607 if (prob > t->probability)
608 prob = t->probability;
609 t->probability -= prob;
610 prob = REG_BR_PROB_BASE - prob;
611 if (prob <= 0)
612 {
613 first->succ->probability = REG_BR_PROB_BASE;
614 first->succ->succ_next->probability = 0;
615 }
616 else
617 for (e = first->succ; e; e = e->succ_next)
618 e->probability = ((e->probability * REG_BR_PROB_BASE)
619 / (double) prob);
620 update_br_prob_note (first);
621 }
622 else
623 {
624 /* It is possible that as the result of
625 threading we've removed edge as it is
626 threaded to the fallthru edge. Avoid
627 getting out of sync. */
628 if (n < nthreaded_edges
629 && first == threaded_edges [n]->src)
630 n++;
631 t = first->succ;
632 }
633
634 t->count -= edge_count;
635 if (t->count < 0)
636 t->count = 0;
637 first = t->dest;
638 }
639 while (first != target);
640
641 changed = true;
642 }
643 }
644
645 if (threaded_edges)
646 free (threaded_edges);
647 return changed;
648 }
649
650 /* Return true if LABEL is used for tail recursion. */
651
652 static bool
653 tail_recursion_label_p (rtx label)
654 {
655 rtx x;
656
657 for (x = tail_recursion_label_list; x; x = XEXP (x, 1))
658 if (label == XEXP (x, 0))
659 return true;
660
661 return false;
662 }
663
664 /* Blocks A and B are to be merged into a single block. A has no incoming
665 fallthru edge, so it can be moved before B without adding or modifying
666 any jumps (aside from the jump from A to B). */
667
668 static void
669 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
670 {
671 rtx barrier;
672
673 barrier = next_nonnote_insn (BB_END (a));
674 if (GET_CODE (barrier) != BARRIER)
675 abort ();
676 delete_insn (barrier);
setjmp_call_p(tree fndecl)677
678 /* Move block and loop notes out of the chain so that we do not
679 disturb their order.
680
681 ??? A better solution would be to squeeze out all the non-nested notes
682 and adjust the block trees appropriately. Even better would be to have
683 a tighter connection between block trees and rtl so that this is not
684 necessary. */
685 if (squeeze_notes (&BB_HEAD (a), &BB_END (a)))
686 abort ();
687
688 /* Scramble the insn chain. */
689 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
690 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
691 a->flags |= BB_DIRTY;
692
693 if (rtl_dump_file)
694 fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n",
695 a->index, b->index);
696
697 /* Swap the records for the two blocks around. */
698
flags_from_decl_or_type(tree exp)699 unlink_block (a);
700 link_block (a, b->prev_bb);
701
702 /* Now blocks A and B are contiguous. Merge them. */
703 merge_blocks (a, b);
704 }
705
706 /* Blocks A and B are to be merged into a single block. B has no outgoing
707 fallthru edge, so it can be moved after A without adding or modifying
708 any jumps (aside from the jump from A to B). */
709
710 static void
711 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
712 {
713 rtx barrier, real_b_end;
714 rtx label, table;
715
716 real_b_end = BB_END (b);
717
718 /* If there is a jump table following block B temporarily add the jump table
719 to block B so that it will also be moved to the correct location. */
720 if (tablejump_p (BB_END (b), &label, &table)
721 && prev_active_insn (label) == BB_END (b))
722 {
723 BB_END (b) = table;
724 }
725
726 /* There had better have been a barrier there. Delete it. */
727 barrier = NEXT_INSN (BB_END (b));
728 if (barrier && GET_CODE (barrier) == BARRIER)
729 delete_insn (barrier);
730
731 /* Move block and loop notes out of the chain so that we do not
732 disturb their order.
733
734 ??? A better solution would be to squeeze out all the non-nested notes
735 and adjust the block trees appropriately. Even better would be to have
736 a tighter connection between block trees and rtl so that this is not
737 necessary. */
738 if (squeeze_notes (&BB_HEAD (b), &BB_END (b)))
739 abort ();
740
741 /* Scramble the insn chain. */
742 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
743
744 /* Restore the real end of b. */
745 BB_END (b) = real_b_end;
746
747 if (rtl_dump_file)
748 fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n",
749 b->index, a->index);
750
751 /* Now blocks A and B are contiguous. Merge them. */
call_expr_flags(tree t)752 merge_blocks (a, b);
753 }
754
755 /* Attempt to merge basic blocks that are potentially non-adjacent.
756 Return NULL iff the attempt failed, otherwise return basic block
757 where cleanup_cfg should continue. Because the merging commonly
758 moves basic block away or introduces another optimization
759 possibility, return basic block just before B so cleanup_cfg don't
760 need to iterate.
761
762 It may be good idea to return basic block before C in the case
763 C has been moved after B and originally appeared earlier in the
764 insn sequence, but we have no information available about the
765 relative ordering of these two. Hopefully it is not too common. */
766
767 static basic_block
768 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
769 {
770 basic_block next;
771 /* If C has a tail recursion label, do not merge. There is no
772 edge recorded from the call_placeholder back to this label, as
773 that would make optimize_sibling_and_tail_recursive_calls more
774 complex for no gain. */
775 if ((mode & CLEANUP_PRE_SIBCALL)
776 && GET_CODE (BB_HEAD (c)) == CODE_LABEL
777 && tail_recursion_label_p (BB_HEAD (c)))
778 return NULL;
precompute_register_parameters(int num_actuals,struct arg_data * args,int * reg_parm_seen)779
780 /* If B has a fallthru edge to C, no need to move anything. */
781 if (e->flags & EDGE_FALLTHRU)
782 {
783 int b_index = b->index, c_index = c->index;
784 merge_blocks (b, c);
785 update_forwarder_flag (b);
786
787 if (rtl_dump_file)
788 fprintf (rtl_dump_file, "Merged %d and %d without moving.\n",
789 b_index, c_index);
790
791 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
792 }
793
794 /* Otherwise we will need to move code around. Do that only if expensive
795 transformations are allowed. */
796 else if (mode & CLEANUP_EXPENSIVE)
797 {
798 edge tmp_edge, b_fallthru_edge;
799 bool c_has_outgoing_fallthru;
800 bool b_has_incoming_fallthru;
801
802 /* Avoid overactive code motion, as the forwarder blocks should be
803 eliminated by edge redirection instead. One exception might have
804 been if B is a forwarder block and C has no fallthru edge, but
805 that should be cleaned up by bb-reorder instead. */
806 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
807 return NULL;
808
809 /* We must make sure to not munge nesting of lexical blocks,
810 and loop notes. This is done by squeezing out all the notes
811 and leaving them there to lie. Not ideal, but functional. */
812
813 for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
814 if (tmp_edge->flags & EDGE_FALLTHRU)
815 break;
816
817 c_has_outgoing_fallthru = (tmp_edge != NULL);
818
819 for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
820 if (tmp_edge->flags & EDGE_FALLTHRU)
821 break;
822
823 b_has_incoming_fallthru = (tmp_edge != NULL);
824 b_fallthru_edge = tmp_edge;
825 next = b->prev_bb;
826 if (next == c)
827 next = next->prev_bb;
828
829 /* Otherwise, we're going to try to move C after B. If C does
830 not have an outgoing fallthru, then it can be moved
831 immediately after B without introducing or modifying jumps. */
832 if (! c_has_outgoing_fallthru)
833 {
834 merge_blocks_move_successor_nojumps (b, c);
835 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
836 }
837
838 /* If B does not have an incoming fallthru, then it can be moved
839 immediately before C without introducing or modifying jumps.
840 C cannot be the first block, so we do not have to worry about
841 accessing a non-existent block. */
842
843 if (b_has_incoming_fallthru)
save_fixed_argument_area(int reg_parm_stack_space,rtx argblock,int * low_to_save,int * high_to_save)844 {
845 basic_block bb;
846
847 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
848 return NULL;
849 bb = force_nonfallthru (b_fallthru_edge);
850 if (bb)
851 notice_new_block (bb);
852 }
853
854 merge_blocks_move_predecessor_nojumps (b, c);
855 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
856 }
857
858 return NULL;
859 }
860
861
862 /* Removes the memory attributes of MEM expression
863 if they are not equal. */
864
865 void
866 merge_memattrs (rtx x, rtx y)
867 {
868 int i;
869 int j;
870 enum rtx_code code;
871 const char *fmt;
872
873 if (x == y)
874 return;
875 if (x == 0 || y == 0)
876 return;
877
878 code = GET_CODE (x);
879
880 if (code != GET_CODE (y))
881 return;
882
883 if (GET_MODE (x) != GET_MODE (y))
884 return;
885
886 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
887 {
888 if (! MEM_ATTRS (x))
889 MEM_ATTRS (y) = 0;
890 else if (! MEM_ATTRS (y))
891 MEM_ATTRS (x) = 0;
892 else
893 {
894 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
895 {
896 set_mem_alias_set (x, 0);
897 set_mem_alias_set (y, 0);
898 }
899
900 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
901 {
902 set_mem_expr (x, 0);
903 set_mem_expr (y, 0);
904 set_mem_offset (x, 0);
905 set_mem_offset (y, 0);
906 }
907 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
908 {
909 set_mem_offset (x, 0);
910 set_mem_offset (y, 0);
restore_fixed_argument_area(rtx save_area,rtx argblock,int high_to_save,int low_to_save)911 }
912
913 set_mem_size (x, MAX (MEM_SIZE (x), MEM_SIZE (y)));
914 set_mem_size (y, MEM_SIZE (x));
915
916 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
917 set_mem_align (y, MEM_ALIGN (x));
918 }
919 }
920
921 fmt = GET_RTX_FORMAT (code);
922 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
923 {
924 switch (fmt[i])
925 {
926 case 'E':
927 /* Two vectors must have the same length. */
928 if (XVECLEN (x, i) != XVECLEN (y, i))
929 return;
930
931 for (j = 0; j < XVECLEN (x, i); j++)
932 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
933
934 break;
935
936 case 'e':
937 merge_memattrs (XEXP (x, i), XEXP (y, i));
938 }
939 }
940 return;
941 }
942
943
944 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
945
store_unaligned_arguments_into_pseudos(struct arg_data * args,int num_actuals)946 static bool
947 insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
948 {
949 rtx p1, p2;
950
951 /* Verify that I1 and I2 are equivalent. */
952 if (GET_CODE (i1) != GET_CODE (i2))
953 return false;
954
955 p1 = PATTERN (i1);
956 p2 = PATTERN (i2);
957
958 if (GET_CODE (p1) != GET_CODE (p2))
959 return false;
960
961 /* If this is a CALL_INSN, compare register usage information.
962 If we don't check this on stack register machines, the two
963 CALL_INSNs might be merged leaving reg-stack.c with mismatching
964 numbers of stack registers in the same basic block.
965 If we don't check this on machines with delay slots, a delay slot may
966 be filled that clobbers a parameter expected by the subroutine.
967
968 ??? We take the simple route for now and assume that if they're
969 equal, they were constructed identically. */
970
971 if (GET_CODE (i1) == CALL_INSN
972 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
973 CALL_INSN_FUNCTION_USAGE (i2))
974 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
975 return false;
976
977 #ifdef STACK_REGS
978 /* If cross_jump_death_matters is not 0, the insn's mode
979 indicates whether or not the insn contains any stack-like
980 regs. */
981
982 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
983 {
984 /* If register stack conversion has already been done, then
985 death notes must also be compared before it is certain that
986 the two instruction streams match. */
987
988 rtx note;
989 HARD_REG_SET i1_regset, i2_regset;
990
991 CLEAR_HARD_REG_SET (i1_regset);
992 CLEAR_HARD_REG_SET (i2_regset);
993
994 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
995 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
996 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
997
998 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
999 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1000 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1001
1002 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
1003
1004 return false;
1005
1006 done:
1007 ;
1008 }
1009 #endif
1010
1011 if (reload_completed
1012 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1013 return true;
1014
1015 /* Do not do EQUIV substitution after reload. First, we're undoing the
1016 work of reload_cse. Second, we may be undoing the work of the post-
1017 reload splitting pass. */
1018 /* ??? Possibly add a new phase switch variable that can be used by
1019 targets to disallow the troublesome insns after splitting. */
1020 if (!reload_completed)
1021 {
1022 /* The following code helps take care of G++ cleanups. */
1023 rtx equiv1 = find_reg_equal_equiv_note (i1);
1024 rtx equiv2 = find_reg_equal_equiv_note (i2);
1025
1026 if (equiv1 && equiv2
1027 /* If the equivalences are not to a constant, they may
1028 reference pseudos that no longer exist, so we can't
1029 use them. */
1030 && (! reload_completed
1031 || (CONSTANT_P (XEXP (equiv1, 0))
1032 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1033 {
1034 rtx s1 = single_set (i1);
1035 rtx s2 = single_set (i2);
1036 if (s1 != 0 && s2 != 0
1037 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1038 {
1039 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1040 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1041 if (! rtx_renumbered_equal_p (p1, p2))
1042 cancel_changes (0);
1043 else if (apply_change_group ())
1044 return true;
1045 }
1046 }
1047 }
1048
1049 return false;
1050 }
1051
1052 /* Look through the insns at the end of BB1 and BB2 and find the longest
1053 sequence that are equivalent. Store the first insns for that sequence
1054 in *F1 and *F2 and return the sequence length.
1055
1056 To simplify callers of this function, if the blocks match exactly,
1057 store the head of the blocks in *F1 and *F2. */
1058
1059 static int
1060 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1061 basic_block bb2, rtx *f1, rtx *f2)
1062 {
1063 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1064 int ninsns = 0;
1065
1066 /* Skip simple jumps at the end of the blocks. Complex jumps still
1067 need to be compared for equivalence, which we'll do below. */
1068
1069 i1 = BB_END (bb1);
1070 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1071 if (onlyjump_p (i1)
1072 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1073 {
1074 last1 = i1;
1075 i1 = PREV_INSN (i1);
1076 }
1077
1078 i2 = BB_END (bb2);
1079 if (onlyjump_p (i2)
1080 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1081 {
1082 last2 = i2;
1083 /* Count everything except for unconditional jump as insn. */
1084 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1085 ninsns++;
1086 i2 = PREV_INSN (i2);
1087 }
1088
1089 while (true)
1090 {
1091 /* Ignore notes. */
1092 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1093 i1 = PREV_INSN (i1);
1094
1095 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1096 i2 = PREV_INSN (i2);
1097
1098 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1099 break;
1100
1101 if (!insns_match_p (mode, i1, i2))
1102 break;
1103
1104 merge_memattrs (i1, i2);
1105
1106 /* Don't begin a cross-jump with a NOTE insn. */
1107 if (INSN_P (i1))
1108 {
1109 /* If the merged insns have different REG_EQUAL notes, then
1110 remove them. */
1111 rtx equiv1 = find_reg_equal_equiv_note (i1);
1112 rtx equiv2 = find_reg_equal_equiv_note (i2);
1113
1114 if (equiv1 && !equiv2)
1115 remove_note (i1, equiv1);
1116 else if (!equiv1 && equiv2)
1117 remove_note (i2, equiv2);
1118 else if (equiv1 && equiv2
1119 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1120 {
1121 remove_note (i1, equiv1);
1122 remove_note (i2, equiv2);
1123 }
1124
1125 afterlast1 = last1, afterlast2 = last2;
1126 last1 = i1, last2 = i2;
1127 ninsns++;
1128 }
1129
1130 i1 = PREV_INSN (i1);
1131 i2 = PREV_INSN (i2);
1132 }
1133
1134 #ifdef HAVE_cc0
1135 /* Don't allow the insn after a compare to be shared by
1136 cross-jumping unless the compare is also shared. */
1137 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1138 last1 = afterlast1, last2 = afterlast2, ninsns--;
1139 #endif
1140
1141 /* Include preceding notes and labels in the cross-jump. One,
1142 this may bring us to the head of the blocks as requested above.
1143 Two, it keeps line number notes as matched as may be. */
1144 if (ninsns)
1145 {
1146 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1147 last1 = PREV_INSN (last1);
1148
1149 if (last1 != BB_HEAD (bb1) && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
1150 last1 = PREV_INSN (last1);
1151
1152 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1153 last2 = PREV_INSN (last2);
1154
1155 if (last2 != BB_HEAD (bb2) && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
1156 last2 = PREV_INSN (last2);
1157
1158 *f1 = last1;
1159 *f2 = last2;
1160 }
1161
1162 return ninsns;
1163 }
1164
1165 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1166 the branch instruction. This means that if we commonize the control
1167 flow before end of the basic block, the semantic remains unchanged.
1168
1169 We may assume that there exists one edge with a common destination. */
1170
1171 static bool
1172 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1173 {
1174 int nehedges1 = 0, nehedges2 = 0;
1175 edge fallthru1 = 0, fallthru2 = 0;
1176 edge e1, e2;
1177
1178 /* If BB1 has only one successor, we may be looking at either an
1179 unconditional jump, or a fake edge to exit. */
1180 if (bb1->succ && !bb1->succ->succ_next
1181 && (bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1182 && (GET_CODE (BB_END (bb1)) != JUMP_INSN || simplejump_p (BB_END (bb1))))
1183 return (bb2->succ && !bb2->succ->succ_next
1184 && (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1185 && (GET_CODE (BB_END (bb2)) != JUMP_INSN || simplejump_p (BB_END (bb2))));
1186
1187 /* Match conditional jumps - this may get tricky when fallthru and branch
1188 edges are crossed. */
1189 if (bb1->succ
1190 && bb1->succ->succ_next
1191 && !bb1->succ->succ_next->succ_next
1192 && any_condjump_p (BB_END (bb1))
1193 && onlyjump_p (BB_END (bb1)))
1194 {
1195 edge b1, f1, b2, f2;
1196 bool reverse, match;
1197 rtx set1, set2, cond1, cond2;
1198 enum rtx_code code1, code2;
1199
1200 if (!bb2->succ
1201 || !bb2->succ->succ_next
1202 || bb2->succ->succ_next->succ_next
1203 || !any_condjump_p (BB_END (bb2))
1204 || !onlyjump_p (BB_END (bb2)))
1205 return false;
1206
1207 b1 = BRANCH_EDGE (bb1);
1208 b2 = BRANCH_EDGE (bb2);
1209 f1 = FALLTHRU_EDGE (bb1);
1210 f2 = FALLTHRU_EDGE (bb2);
1211
1212 /* Get around possible forwarders on fallthru edges. Other cases
1213 should be optimized out already. */
1214 if (FORWARDER_BLOCK_P (f1->dest))
1215 f1 = f1->dest->succ;
1216
1217 if (FORWARDER_BLOCK_P (f2->dest))
1218 f2 = f2->dest->succ;
1219
1220 /* To simplify use of this function, return false if there are
1221 unneeded forwarder blocks. These will get eliminated later
1222 during cleanup_cfg. */
1223 if (FORWARDER_BLOCK_P (f1->dest)
1224 || FORWARDER_BLOCK_P (f2->dest)
1225 || FORWARDER_BLOCK_P (b1->dest)
1226 || FORWARDER_BLOCK_P (b2->dest))
1227 return false;
1228
1229 if (f1->dest == f2->dest && b1->dest == b2->dest)
1230 reverse = false;
1231 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1232 reverse = true;
1233 else
1234 return false;
1235
1236 set1 = pc_set (BB_END (bb1));
1237 set2 = pc_set (BB_END (bb2));
1238 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1239 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1240 reverse = !reverse;
1241
1242 cond1 = XEXP (SET_SRC (set1), 0);
1243 cond2 = XEXP (SET_SRC (set2), 0);
1244 code1 = GET_CODE (cond1);
1245 if (reverse)
1246 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1247 else
1248 code2 = GET_CODE (cond2);
1249
1250 if (code2 == UNKNOWN)
1251 return false;
1252
1253 /* Verify codes and operands match. */
1254 match = ((code1 == code2
1255 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1256 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1257 || (code1 == swap_condition (code2)
1258 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1259 XEXP (cond2, 0))
1260 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1261 XEXP (cond2, 1))));
1262
1263 /* If we return true, we will join the blocks. Which means that
1264 we will only have one branch prediction bit to work with. Thus
1265 we require the existing branches to have probabilities that are
1266 roughly similar. */
1267 if (match
1268 && !optimize_size
1269 && maybe_hot_bb_p (bb1)
1270 && maybe_hot_bb_p (bb2))
1271 {
1272 int prob2;
1273
1274 if (b1->dest == b2->dest)
1275 prob2 = b2->probability;
1276 else
1277 /* Do not use f2 probability as f2 may be forwarded. */
1278 prob2 = REG_BR_PROB_BASE - b2->probability;
1279
1280 /* Fail if the difference in probabilities is greater than 50%.
1281 This rules out two well-predicted branches with opposite
1282 outcomes. */
1283 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1284 {
1285 if (rtl_dump_file)
1286 fprintf (rtl_dump_file,
1287 "Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
1288 bb1->index, bb2->index, b1->probability, prob2);
1289
compute_argument_block_size(int reg_parm_stack_space,struct args_size * args_size,int preferred_stack_boundary ATTRIBUTE_UNUSED)1290 return false;
1291 }
1292 }
1293
1294 if (rtl_dump_file && match)
1295 fprintf (rtl_dump_file, "Conditionals in bb %i and %i match.\n",
1296 bb1->index, bb2->index);
1297
1298 return match;
1299 }
1300
1301 /* Generic case - we are seeing a computed jump, table jump or trapping
1302 instruction. */
1303
1304 #ifndef CASE_DROPS_THROUGH
1305 /* Check whether there are tablejumps in the end of BB1 and BB2.
1306 Return true if they are identical. */
1307 {
1308 rtx label1, label2;
1309 rtx table1, table2;
1310
1311 if (tablejump_p (BB_END (bb1), &label1, &table1)
1312 && tablejump_p (BB_END (bb2), &label2, &table2)
1313 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1314 {
1315 /* The labels should never be the same rtx. If they really are same
1316 the jump tables are same too. So disable crossjumping of blocks BB1
1317 and BB2 because when deleting the common insns in the end of BB1
1318 by delete_block () the jump table would be deleted too. */
1319 /* If LABEL2 is referenced in BB1->END do not do anything
1320 because we would loose information when replacing
1321 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1322 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1323 {
1324 /* Set IDENTICAL to true when the tables are identical. */
1325 bool identical = false;
1326 rtx p1, p2;
1327
1328 p1 = PATTERN (table1);
1329 p2 = PATTERN (table2);
1330 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1331 {
1332 identical = true;
1333 }
1334 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1335 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1336 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1337 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1338 {
1339 int i;
1340
1341 identical = true;
1342 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1343 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1344 identical = false;
1345 }
1346
1347 if (identical)
1348 {
1349 replace_label_data rr;
1350 bool match;
1351
1352 /* Temporarily replace references to LABEL1 with LABEL2
1353 in BB1->END so that we could compare the instructions. */
1354 rr.r1 = label1;
1355 rr.r2 = label2;
1356 rr.update_label_nuses = false;
1357 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1358
1359 match = insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1360 if (rtl_dump_file && match)
1361 fprintf (rtl_dump_file,
1362 "Tablejumps in bb %i and %i match.\n",
1363 bb1->index, bb2->index);
1364
1365 /* Set the original label in BB1->END because when deleting
1366 a block whose end is a tablejump, the tablejump referenced
1367 from the instruction is deleted too. */
1368 rr.r1 = label2;
1369 rr.r2 = label1;
1370 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1371
1372 return match;
1373 }
1374 }
precompute_arguments(int flags,int num_actuals,struct arg_data * args)1375 return false;
1376 }
1377 }
1378 #endif
1379
1380 /* First ensure that the instructions match. There may be many outgoing
1381 edges so this test is generally cheaper. */
1382 if (!insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1383 return false;
1384
1385 /* Search the outgoing edges, ensure that the counts do match, find possible
1386 fallthru and exception handling edges since these needs more
1387 validation. */
1388 for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2;
1389 e1 = e1->succ_next, e2 = e2->succ_next)
1390 {
1391 if (e1->flags & EDGE_EH)
1392 nehedges1++;
1393
1394 if (e2->flags & EDGE_EH)
1395 nehedges2++;
1396
1397 if (e1->flags & EDGE_FALLTHRU)
1398 fallthru1 = e1;
1399 if (e2->flags & EDGE_FALLTHRU)
1400 fallthru2 = e2;
1401 }
1402
1403 /* If number of edges of various types does not match, fail. */
1404 if (e1 || e2
1405 || nehedges1 != nehedges2
1406 || (fallthru1 != 0) != (fallthru2 != 0))
1407 return false;
1408
1409 /* fallthru edges must be forwarded to the same destination. */
1410 if (fallthru1)
1411 {
1412 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1413 ? fallthru1->dest->succ->dest: fallthru1->dest);
1414 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1415 ? fallthru2->dest->succ->dest: fallthru2->dest);
1416
1417 if (d1 != d2)
1418 return false;
1419 }
1420
1421 /* Ensure the same EH region. */
1422 {
1423 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1424 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1425
1426 if (!n1 && n2)
1427 return false;
1428
1429 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1430 return false;
1431 }
1432
1433 /* We don't need to match the rest of edges as above checks should be enough
1434 to ensure that they are equivalent. */
1435 return true;
1436 }
1437
1438 /* E1 and E2 are edges with the same destination block. Search their
1439 predecessors for common code. If found, redirect control flow from
1440 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1441
1442 static bool
finalize_must_preallocate(int must_preallocate,int num_actuals,struct arg_data * args,struct args_size * args_size)1443 try_crossjump_to_edge (int mode, edge e1, edge e2)
1444 {
1445 int nmatch;
1446 basic_block src1 = e1->src, src2 = e2->src;
1447 basic_block redirect_to, redirect_from, to_remove;
1448 rtx newpos1, newpos2;
1449 edge s;
1450
1451 /* Search backward through forwarder blocks. We don't need to worry
1452 about multiple entry or chained forwarders, as they will be optimized
1453 away. We do this to look past the unconditional jump following a
1454 conditional jump that is required due to the current CFG shape. */
1455 if (src1->pred
1456 && !src1->pred->pred_next
1457 && FORWARDER_BLOCK_P (src1))
1458 e1 = src1->pred, src1 = e1->src;
1459
1460 if (src2->pred
1461 && !src2->pred->pred_next
1462 && FORWARDER_BLOCK_P (src2))
1463 e2 = src2->pred, src2 = e2->src;
1464
1465 /* Nothing to do if we reach ENTRY, or a common source block. */
1466 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1467 return false;
1468 if (src1 == src2)
1469 return false;
1470
1471 /* Seeing more than 1 forwarder blocks would confuse us later... */
1472 if (FORWARDER_BLOCK_P (e1->dest)
1473 && FORWARDER_BLOCK_P (e1->dest->succ->dest))
1474 return false;
1475
1476 if (FORWARDER_BLOCK_P (e2->dest)
1477 && FORWARDER_BLOCK_P (e2->dest->succ->dest))
1478 return false;
1479
1480 /* Likewise with dead code (possibly newly created by the other optimizations
1481 of cfg_cleanup). */
1482 if (!src1->pred || !src2->pred)
1483 return false;
1484
1485 /* Look for the common insn sequence, part the first ... */
1486 if (!outgoing_edges_match (mode, src1, src2))
1487 return false;
1488
1489 /* ... and part the second. */
1490 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1491 if (!nmatch)
1492 return false;
1493
1494 #ifndef CASE_DROPS_THROUGH
1495 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1496 will be deleted.
1497 If we have tablejumps in the end of SRC1 and SRC2
1498 they have been already compared for equivalence in outgoing_edges_match ()
1499 so replace the references to TABLE1 by references to TABLE2. */
1500 {
1501 rtx label1, label2;
1502 rtx table1, table2;
1503
1504 if (tablejump_p (BB_END (src1), &label1, &table1)
1505 && tablejump_p (BB_END (src2), &label2, &table2)
1506 && label1 != label2)
1507 {
1508 replace_label_data rr;
1509 rtx insn;
1510
1511 /* Replace references to LABEL1 with LABEL2. */
1512 rr.r1 = label1;
1513 rr.r2 = label2;
1514 rr.update_label_nuses = true;
1515 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1516 {
1517 /* Do not replace the label in SRC1->END because when deleting
1518 a block whose end is a tablejump, the tablejump referenced
1519 from the instruction is deleted too. */
1520 if (insn != BB_END (src1))
1521 for_each_rtx (&insn, replace_label, &rr);
1522 }
1523 }
1524 }
1525 #endif
1526
1527 /* Avoid splitting if possible. */
1528 if (newpos2 == BB_HEAD (src2))
1529 redirect_to = src2;
1530 else
1531 {
1532 if (rtl_dump_file)
1533 fprintf (rtl_dump_file, "Splitting bb %i before %i insns\n",
1534 src2->index, nmatch);
1535 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1536 }
1537
1538 if (rtl_dump_file)
1539 fprintf (rtl_dump_file,
1540 "Cross jumping from bb %i to bb %i; %i common insns\n",
1541 src1->index, src2->index, nmatch);
1542
1543 redirect_to->count += src1->count;
1544 redirect_to->frequency += src1->frequency;
1545 /* We may have some registers visible trought the block. */
1546 redirect_to->flags |= BB_DIRTY;
1547
1548 /* Recompute the frequencies and counts of outgoing edges. */
1549 for (s = redirect_to->succ; s; s = s->succ_next)
1550 {
1551 edge s2;
1552 basic_block d = s->dest;
1553
1554 if (FORWARDER_BLOCK_P (d))
1555 d = d->succ->dest;
1556
1557 for (s2 = src1->succ; ; s2 = s2->succ_next)
1558 {
1559 basic_block d2 = s2->dest;
1560 if (FORWARDER_BLOCK_P (d2))
1561 d2 = d2->succ->dest;
1562 if (d == d2)
1563 break;
1564 }
1565
1566 s->count += s2->count;
1567
1568 /* Take care to update possible forwarder blocks. We verified
1569 that there is no more than one in the chain, so we can't run
1570 into infinite loop. */
1571 if (FORWARDER_BLOCK_P (s->dest))
1572 {
1573 s->dest->succ->count += s2->count;
1574 s->dest->count += s2->count;
1575 s->dest->frequency += EDGE_FREQUENCY (s);
1576 }
1577
1578 if (FORWARDER_BLOCK_P (s2->dest))
1579 {
1580 s2->dest->succ->count -= s2->count;
1581 if (s2->dest->succ->count < 0)
1582 s2->dest->succ->count = 0;
1583 s2->dest->count -= s2->count;
1584 s2->dest->frequency -= EDGE_FREQUENCY (s);
1585 if (s2->dest->frequency < 0)
1586 s2->dest->frequency = 0;
1587 if (s2->dest->count < 0)
1588 s2->dest->count = 0;
1589 }
1590
1591 if (!redirect_to->frequency && !src1->frequency)
1592 s->probability = (s->probability + s2->probability) / 2;
1593 else
1594 s->probability
1595 = ((s->probability * redirect_to->frequency +
1596 s2->probability * src1->frequency)
1597 / (redirect_to->frequency + src1->frequency));
1598 }
1599
1600 update_br_prob_note (redirect_to);
1601
1602 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1603
1604 /* Skip possible basic block header. */
1605 if (GET_CODE (newpos1) == CODE_LABEL)
1606 newpos1 = NEXT_INSN (newpos1);
1607
1608 if (GET_CODE (newpos1) == NOTE)
1609 newpos1 = NEXT_INSN (newpos1);
1610
1611 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1612 to_remove = redirect_from->succ->dest;
1613
1614 redirect_edge_and_branch_force (redirect_from->succ, redirect_to);
1615 delete_block (to_remove);
1616
1617 update_forwarder_flag (redirect_from);
1618
1619 return true;
1620 }
1621
1622 /* Search the predecessors of BB for common insn sequences. When found,
1623 share code between them by redirecting control flow. Return true if
1624 any changes made. */
1625
1626 static bool
1627 try_crossjump_bb (int mode, basic_block bb)
1628 {
1629 edge e, e2, nexte2, nexte, fallthru;
1630 bool changed;
1631 int n = 0, max;
1632
1633 /* Nothing to do if there is not at least two incoming edges. */
1634 if (!bb->pred || !bb->pred->pred_next)
1635 return false;
1636
1637 /* It is always cheapest to redirect a block that ends in a branch to
1638 a block that falls through into BB, as that adds no branches to the
1639 program. We'll try that combination first. */
1640 fallthru = NULL;
1641 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1642 for (e = bb->pred; e ; e = e->pred_next, n++)
1643 {
1644 if (e->flags & EDGE_FALLTHRU)
1645 fallthru = e;
1646 if (n > max)
1647 return false;
1648 }
1649
1650 changed = false;
1651 for (e = bb->pred; e; e = nexte)
1652 {
1653 nexte = e->pred_next;
1654
1655 /* As noted above, first try with the fallthru predecessor. */
1656 if (fallthru)
1657 {
1658 /* Don't combine the fallthru edge into anything else.
1659 If there is a match, we'll do it the other way around. */
1660 if (e == fallthru)
1661 continue;
1662
1663 if (try_crossjump_to_edge (mode, e, fallthru))
1664 {
1665 changed = true;
1666 nexte = bb->pred;
1667 continue;
1668 }
1669 }
1670
1671 /* Non-obvious work limiting check: Recognize that we're going
1672 to call try_crossjump_bb on every basic block. So if we have
1673 two blocks with lots of outgoing edges (a switch) and they
1674 share lots of common destinations, then we would do the
1675 cross-jump check once for each common destination.
1676
1677 Now, if the blocks actually are cross-jump candidates, then
1678 all of their destinations will be shared. Which means that
1679 we only need check them for cross-jump candidacy once. We
1680 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1681 choosing to do the check from the block for which the edge
1682 in question is the first successor of A. */
1683 if (e->src->succ != e)
1684 continue;
1685
1686 for (e2 = bb->pred; e2; e2 = nexte2)
1687 {
1688 nexte2 = e2->pred_next;
1689
1690 if (e2 == e)
1691 continue;
1692
1693 /* We've already checked the fallthru edge above. */
1694 if (e2 == fallthru)
1695 continue;
1696
1697 /* The "first successor" check above only prevents multiple
1698 checks of crossjump(A,B). In order to prevent redundant
1699 checks of crossjump(B,A), require that A be the block
1700 with the lowest index. */
1701 if (e->src->index > e2->src->index)
1702 continue;
1703
1704 if (try_crossjump_to_edge (mode, e, e2))
1705 {
1706 changed = true;
1707 nexte = bb->pred;
1708 break;
1709 }
1710 }
1711 }
1712
1713 return changed;
1714 }
1715
1716 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1717 instructions etc. Return nonzero if changes were made. */
1718
1719 static bool
1720 try_optimize_cfg (int mode)
1721 {
1722 bool changed_overall = false;
1723 bool changed;
1724 int iterations = 0;
1725 basic_block bb, b, next;
1726
1727 if (mode & CLEANUP_CROSSJUMP)
1728 add_noreturn_fake_exit_edges ();
1729
1730 FOR_EACH_BB (bb)
1731 update_forwarder_flag (bb);
try_to_integrate(tree fndecl,tree actparms,rtx target,int ignore,tree type,rtx structure_value_addr)1732
1733 if (mode & CLEANUP_UPDATE_LIFE)
1734 clear_bb_flags ();
1735
1736 if (! (* targetm.cannot_modify_jumps_p) ())
1737 {
1738 /* Attempt to merge blocks as made possible by edge removal. If
1739 a block has only one successor, and the successor has only
1740 one predecessor, they may be combined. */
1741 do
1742 {
1743 changed = false;
1744 iterations++;
1745
1746 if (rtl_dump_file)
1747 fprintf (rtl_dump_file,
1748 "\n\ntry_optimize_cfg iteration %i\n\n",
1749 iterations);
1750
1751 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1752 {
1753 basic_block c;
1754 edge s;
1755 bool changed_here = false;
1756
1757 /* Delete trivially dead basic blocks. */
1758 while (b->pred == NULL)
1759 {
1760 c = b->prev_bb;
1761 if (rtl_dump_file)
1762 fprintf (rtl_dump_file, "Deleting block %i.\n",
1763 b->index);
1764
1765 delete_block (b);
1766 if (!(mode & CLEANUP_CFGLAYOUT))
1767 changed = true;
1768 b = c;
1769 }
1770
1771 /* Remove code labels no longer used. Don't do this
1772 before CALL_PLACEHOLDER is removed, as some branches
1773 may be hidden within. */
1774 if (b->pred->pred_next == NULL
1775 && (b->pred->flags & EDGE_FALLTHRU)
1776 && !(b->pred->flags & EDGE_COMPLEX)
1777 && GET_CODE (BB_HEAD (b)) == CODE_LABEL
1778 && (!(mode & CLEANUP_PRE_SIBCALL)
1779 || !tail_recursion_label_p (BB_HEAD (b)))
1780 /* If the previous block ends with a branch to this
1781 block, we can't delete the label. Normally this
1782 is a condjump that is yet to be simplified, but
1783 if CASE_DROPS_THRU, this can be a tablejump with
1784 some element going to the same place as the
1785 default (fallthru). */
1786 && (b->pred->src == ENTRY_BLOCK_PTR
1787 || GET_CODE (BB_END (b->pred->src)) != JUMP_INSN
1788 || ! label_is_jump_target_p (BB_HEAD (b),
1789 BB_END (b->pred->src))))
1790 {
1791 rtx label = BB_HEAD (b);
1792
1793 delete_insn_chain (label, label);
1794 /* In the case label is undeletable, move it after the
1795 BASIC_BLOCK note. */
1796 if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1797 {
1798 rtx bb_note = NEXT_INSN (BB_HEAD (b));
1799
1800 reorder_insns_nobb (label, label, bb_note);
1801 BB_HEAD (b) = bb_note;
1802 }
1803 if (rtl_dump_file)
1804 fprintf (rtl_dump_file, "Deleted label in block %i.\n",
1805 b->index);
1806 }
1807
1808 /* If we fall through an empty block, we can remove it. */
1809 if (!(mode & CLEANUP_CFGLAYOUT)
1810 && b->pred->pred_next == NULL
1811 && (b->pred->flags & EDGE_FALLTHRU)
1812 && GET_CODE (BB_HEAD (b)) != CODE_LABEL
1813 && FORWARDER_BLOCK_P (b)
1814 /* Note that forwarder_block_p true ensures that
1815 there is a successor for this block. */
1816 && (b->succ->flags & EDGE_FALLTHRU)
1817 && n_basic_blocks > 1)
1818 {
1819 if (rtl_dump_file)
1820 fprintf (rtl_dump_file,
1821 "Deleting fallthru block %i.\n",
1822 b->index);
1823
1824 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1825 redirect_edge_succ_nodup (b->pred, b->succ->dest);
1826 delete_block (b);
1827 changed = true;
1828 b = c;
1829 }
1830
1831 if ((s = b->succ) != NULL
1832 && s->succ_next == NULL
1833 && !(s->flags & EDGE_COMPLEX)
1834 && (c = s->dest) != EXIT_BLOCK_PTR
1835 && c->pred->pred_next == NULL
1836 && b != c)
1837 {
1838 /* When not in cfg_layout mode use code aware of reordering
1839 INSN. This code possibly creates new basic blocks so it
1840 does not fit merge_blocks interface and is kept here in
1841 hope that it will become useless once more of compiler
1842 is transformed to use cfg_layout mode. */
1843
1844 if ((mode & CLEANUP_CFGLAYOUT)
1845 && can_merge_blocks_p (b, c))
1846 {
1847 merge_blocks (b, c);
1848 update_forwarder_flag (b);
1849 changed_here = true;
1850 }
1851 else if (!(mode & CLEANUP_CFGLAYOUT)
1852 /* If the jump insn has side effects,
1853 we can't kill the edge. */
1854 && (GET_CODE (BB_END (b)) != JUMP_INSN
combine_pending_stack_adjustment_and_call(int unadjusted_args_size,struct args_size * args_size,int preferred_unit_stack_boundary)1855 || (reload_completed
1856 ? simplejump_p (BB_END (b))
1857 : onlyjump_p (BB_END (b))))
1858 && (next = merge_blocks_move (s, b, c, mode)))
1859 {
1860 b = next;
1861 changed_here = true;
1862 }
1863 }
1864
1865 /* Simplify branch over branch. */
1866 if ((mode & CLEANUP_EXPENSIVE)
1867 && !(mode & CLEANUP_CFGLAYOUT)
1868 && try_simplify_condjump (b))
1869 changed_here = true;
1870
1871 /* If B has a single outgoing edge, but uses a
1872 non-trivial jump instruction without side-effects, we
1873 can either delete the jump entirely, or replace it
1874 with a simple unconditional jump. */
1875 if (b->succ
1876 && ! b->succ->succ_next
1877 && b->succ->dest != EXIT_BLOCK_PTR
1878 && onlyjump_p (BB_END (b))
1879 && try_redirect_by_replacing_jump (b->succ, b->succ->dest,
1880 (mode & CLEANUP_CFGLAYOUT) != 0))
1881 {
1882 update_forwarder_flag (b);
1883 changed_here = true;
1884 }
1885
1886 /* Simplify branch to branch. */
1887 if (try_forward_edges (mode, b))
1888 changed_here = true;
1889
1890 /* Look for shared code between blocks. */
1891 if ((mode & CLEANUP_CROSSJUMP)
1892 && try_crossjump_bb (mode, b))
1893 changed_here = true;
1894
1895 /* Don't get confused by the index shift caused by
1896 deleting blocks. */
1897 if (!changed_here)
1898 b = b->next_bb;
1899 else
1900 changed = true;
1901 }
1902
1903 if ((mode & CLEANUP_CROSSJUMP)
1904 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1905 changed = true;
1906
1907 #ifdef ENABLE_CHECKING
check_sibcall_argument_overlap_1(rtx x)1908 if (changed)
1909 verify_flow_info ();
1910 #endif
1911
1912 changed_overall |= changed;
1913 }
1914 while (changed);
1915 }
1916
1917 if (mode & CLEANUP_CROSSJUMP)
1918 remove_fake_edges ();
1919
1920 clear_aux_for_blocks ();
1921
1922 return changed_overall;
1923 }
1924
1925 /* Delete all unreachable basic blocks. */
1926
1927 bool
1928 delete_unreachable_blocks (void)
1929 {
1930 bool changed = false;
1931 basic_block b, next_bb;
1932
1933 find_unreachable_blocks ();
1934
1935 /* Delete all unreachable basic blocks. */
1936
1937 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
1938 {
1939 next_bb = b->next_bb;
1940
1941 if (!(b->flags & BB_REACHABLE))
1942 {
1943 delete_block (b);
1944 changed = true;
1945 }
1946 }
1947
1948 if (changed)
1949 tidy_fallthru_edges ();
1950 return changed;
1951 }
1952
1953 /* Tidy the CFG by deleting unreachable code and whatnot. */
1954
1955 bool
1956 cleanup_cfg (int mode)
1957 {
1958 bool changed = false;
1959
1960 timevar_push (TV_CLEANUP_CFG);
1961 if (delete_unreachable_blocks ())
1962 {
1963 changed = true;
1964 /* We've possibly created trivially dead code. Cleanup it right
1965 now to introduce more opportunities for try_optimize_cfg. */
1966 if (!(mode & (CLEANUP_NO_INSN_DEL
1967 | CLEANUP_UPDATE_LIFE | CLEANUP_PRE_SIBCALL))
1968 && !reload_completed)
1969 delete_trivially_dead_insns (get_insns(), max_reg_num ());
1970 }
check_sibcall_argument_overlap(rtx insn,struct arg_data * arg,int mark_stored_args_map)1971
1972 compact_blocks ();
1973
1974 while (try_optimize_cfg (mode))
1975 {
1976 delete_unreachable_blocks (), changed = true;
1977 if (mode & CLEANUP_UPDATE_LIFE)
1978 {
1979 /* Cleaning up CFG introduces more opportunities for dead code
1980 removal that in turn may introduce more opportunities for
1981 cleaning up the CFG. */
1982 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
1983 PROP_DEATH_NOTES
1984 | PROP_SCAN_DEAD_CODE
1985 | PROP_KILL_DEAD_CODE
1986 | ((mode & CLEANUP_LOG_LINKS)
1987 ? PROP_LOG_LINKS : 0)))
1988 break;
1989 }
1990 else if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_PRE_SIBCALL))
1991 && (mode & CLEANUP_EXPENSIVE)
1992 && !reload_completed)
1993 {
1994 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
1995 break;
1996 }
1997 else
1998 break;
1999 delete_dead_jumptables ();
fix_unsafe_tree(tree t)2000 }
2001
2002 /* Kill the data we won't maintain. */
2003 free_EXPR_LIST_list (&label_value_list);
2004 timevar_pop (TV_CLEANUP_CFG);
2005
2006 return changed;
2007 }
2008