xref: /dragonfly/contrib/gcc-4.7/gcc/cfgcleanup.c (revision 0dace59e)
1 /* Control flow optimization code for GNU compiler.
2    Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3    1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011
4    Free Software Foundation, Inc.
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 /* This file contains optimizer of the control flow.  The main entry point is
23    cleanup_cfg.  Following optimizations are performed:
24 
25    - Unreachable blocks removal
26    - Edge forwarding (edge to the forwarder block is forwarded to its
27      successor.  Simplification of the branch instruction is performed by
28      underlying infrastructure so branch can be converted to simplejump or
29      eliminated).
30    - Cross jumping (tail merging)
31    - Conditional jump-around-simplejump simplification
32    - Basic block merging.  */
33 
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "regs.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "diagnostic-core.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "cfglayout.h"
52 #include "emit-rtl.h"
53 #include "tree-pass.h"
54 #include "cfgloop.h"
55 #include "expr.h"
56 #include "df.h"
57 #include "dce.h"
58 #include "dbgcnt.h"
59 
60 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
61 
62 /* Set to true when we are running first pass of try_optimize_cfg loop.  */
63 static bool first_pass;
64 
65 /* Set to true if crossjumps occured in the latest run of try_optimize_cfg.  */
66 static bool crossjumps_occured;
67 
68 /* Set to true if we couldn't run an optimization due to stale liveness
69    information; we should run df_analyze to enable more opportunities.  */
70 static bool block_was_dirty;
71 
72 static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction);
73 static bool try_crossjump_bb (int, basic_block);
74 static bool outgoing_edges_match (int, basic_block, basic_block);
75 static enum replace_direction old_insns_match_p (int, rtx, rtx);
76 
77 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
78 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
79 static bool try_optimize_cfg (int);
80 static bool try_simplify_condjump (basic_block);
81 static bool try_forward_edges (int, basic_block);
82 static edge thread_jump (edge, basic_block);
83 static bool mark_effect (rtx, bitmap);
84 static void notice_new_block (basic_block);
85 static void update_forwarder_flag (basic_block);
86 static int mentions_nonequal_regs (rtx *, void *);
87 static void merge_memattrs (rtx, rtx);
88 
89 /* Set flags for newly created block.  */
90 
91 static void
92 notice_new_block (basic_block bb)
93 {
94   if (!bb)
95     return;
96 
97   if (forwarder_block_p (bb))
98     bb->flags |= BB_FORWARDER_BLOCK;
99 }
100 
101 /* Recompute forwarder flag after block has been modified.  */
102 
103 static void
104 update_forwarder_flag (basic_block bb)
105 {
106   if (forwarder_block_p (bb))
107     bb->flags |= BB_FORWARDER_BLOCK;
108   else
109     bb->flags &= ~BB_FORWARDER_BLOCK;
110 }
111 
112 /* Simplify a conditional jump around an unconditional jump.
113    Return true if something changed.  */
114 
115 static bool
116 try_simplify_condjump (basic_block cbranch_block)
117 {
118   basic_block jump_block, jump_dest_block, cbranch_dest_block;
119   edge cbranch_jump_edge, cbranch_fallthru_edge;
120   rtx cbranch_insn;
121 
122   /* Verify that there are exactly two successors.  */
123   if (EDGE_COUNT (cbranch_block->succs) != 2)
124     return false;
125 
126   /* Verify that we've got a normal conditional branch at the end
127      of the block.  */
128   cbranch_insn = BB_END (cbranch_block);
129   if (!any_condjump_p (cbranch_insn))
130     return false;
131 
132   cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
133   cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
134 
135   /* The next block must not have multiple predecessors, must not
136      be the last block in the function, and must contain just the
137      unconditional jump.  */
138   jump_block = cbranch_fallthru_edge->dest;
139   if (!single_pred_p (jump_block)
140       || jump_block->next_bb == EXIT_BLOCK_PTR
141       || !FORWARDER_BLOCK_P (jump_block))
142     return false;
143   jump_dest_block = single_succ (jump_block);
144 
145   /* If we are partitioning hot/cold basic blocks, we don't want to
146      mess up unconditional or indirect jumps that cross between hot
147      and cold sections.
148 
149      Basic block partitioning may result in some jumps that appear to
150      be optimizable (or blocks that appear to be mergeable), but which really
151      must be left untouched (they are required to make it safely across
152      partition boundaries).  See the comments at the top of
153      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
154 
155   if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
156       || (cbranch_jump_edge->flags & EDGE_CROSSING))
157     return false;
158 
159   /* The conditional branch must target the block after the
160      unconditional branch.  */
161   cbranch_dest_block = cbranch_jump_edge->dest;
162 
163   if (cbranch_dest_block == EXIT_BLOCK_PTR
164       || !can_fallthru (jump_block, cbranch_dest_block))
165     return false;
166 
167   /* Invert the conditional branch.  */
168   if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
169     return false;
170 
171   if (dump_file)
172     fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
173 	     INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
174 
175   /* Success.  Update the CFG to match.  Note that after this point
176      the edge variable names appear backwards; the redirection is done
177      this way to preserve edge profile data.  */
178   cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
179 						cbranch_dest_block);
180   cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
181 						    jump_dest_block);
182   cbranch_jump_edge->flags |= EDGE_FALLTHRU;
183   cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
184   update_br_prob_note (cbranch_block);
185 
186   /* Delete the block with the unconditional jump, and clean up the mess.  */
187   delete_basic_block (jump_block);
188   tidy_fallthru_edge (cbranch_jump_edge);
189   update_forwarder_flag (cbranch_block);
190 
191   return true;
192 }
193 
194 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
195    on register.  Used by jump threading.  */
196 
197 static bool
198 mark_effect (rtx exp, regset nonequal)
199 {
200   int regno;
201   rtx dest;
202   switch (GET_CODE (exp))
203     {
204       /* In case we do clobber the register, mark it as equal, as we know the
205 	 value is dead so it don't have to match.  */
206     case CLOBBER:
207       if (REG_P (XEXP (exp, 0)))
208 	{
209 	  dest = XEXP (exp, 0);
210 	  regno = REGNO (dest);
211 	  if (HARD_REGISTER_NUM_P (regno))
212 	    bitmap_clear_range (nonequal, regno,
213 				hard_regno_nregs[regno][GET_MODE (dest)]);
214 	  else
215 	    bitmap_clear_bit (nonequal, regno);
216 	}
217       return false;
218 
219     case SET:
220       if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
221 	return false;
222       dest = SET_DEST (exp);
223       if (dest == pc_rtx)
224 	return false;
225       if (!REG_P (dest))
226 	return true;
227       regno = REGNO (dest);
228       if (HARD_REGISTER_NUM_P (regno))
229 	bitmap_set_range (nonequal, regno,
230 			  hard_regno_nregs[regno][GET_MODE (dest)]);
231       else
232 	bitmap_set_bit (nonequal, regno);
233       return false;
234 
235     default:
236       return false;
237     }
238 }
239 
240 /* Return nonzero if X is a register set in regset DATA.
241    Called via for_each_rtx.  */
242 static int
243 mentions_nonequal_regs (rtx *x, void *data)
244 {
245   regset nonequal = (regset) data;
246   if (REG_P (*x))
247     {
248       int regno;
249 
250       regno = REGNO (*x);
251       if (REGNO_REG_SET_P (nonequal, regno))
252 	return 1;
253       if (regno < FIRST_PSEUDO_REGISTER)
254 	{
255 	  int n = hard_regno_nregs[regno][GET_MODE (*x)];
256 	  while (--n > 0)
257 	    if (REGNO_REG_SET_P (nonequal, regno + n))
258 	      return 1;
259 	}
260     }
261   return 0;
262 }
263 /* Attempt to prove that the basic block B will have no side effects and
264    always continues in the same edge if reached via E.  Return the edge
265    if exist, NULL otherwise.  */
266 
267 static edge
268 thread_jump (edge e, basic_block b)
269 {
270   rtx set1, set2, cond1, cond2, insn;
271   enum rtx_code code1, code2, reversed_code2;
272   bool reverse1 = false;
273   unsigned i;
274   regset nonequal;
275   bool failed = false;
276   reg_set_iterator rsi;
277 
278   if (b->flags & BB_NONTHREADABLE_BLOCK)
279     return NULL;
280 
281   /* At the moment, we do handle only conditional jumps, but later we may
282      want to extend this code to tablejumps and others.  */
283   if (EDGE_COUNT (e->src->succs) != 2)
284     return NULL;
285   if (EDGE_COUNT (b->succs) != 2)
286     {
287       b->flags |= BB_NONTHREADABLE_BLOCK;
288       return NULL;
289     }
290 
291   /* Second branch must end with onlyjump, as we will eliminate the jump.  */
292   if (!any_condjump_p (BB_END (e->src)))
293     return NULL;
294 
295   if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
296     {
297       b->flags |= BB_NONTHREADABLE_BLOCK;
298       return NULL;
299     }
300 
301   set1 = pc_set (BB_END (e->src));
302   set2 = pc_set (BB_END (b));
303   if (((e->flags & EDGE_FALLTHRU) != 0)
304       != (XEXP (SET_SRC (set1), 1) == pc_rtx))
305     reverse1 = true;
306 
307   cond1 = XEXP (SET_SRC (set1), 0);
308   cond2 = XEXP (SET_SRC (set2), 0);
309   if (reverse1)
310     code1 = reversed_comparison_code (cond1, BB_END (e->src));
311   else
312     code1 = GET_CODE (cond1);
313 
314   code2 = GET_CODE (cond2);
315   reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
316 
317   if (!comparison_dominates_p (code1, code2)
318       && !comparison_dominates_p (code1, reversed_code2))
319     return NULL;
320 
321   /* Ensure that the comparison operators are equivalent.
322      ??? This is far too pessimistic.  We should allow swapped operands,
323      different CCmodes, or for example comparisons for interval, that
324      dominate even when operands are not equivalent.  */
325   if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
326       || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
327     return NULL;
328 
329   /* Short circuit cases where block B contains some side effects, as we can't
330      safely bypass it.  */
331   for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
332        insn = NEXT_INSN (insn))
333     if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
334       {
335 	b->flags |= BB_NONTHREADABLE_BLOCK;
336 	return NULL;
337       }
338 
339   cselib_init (0);
340 
341   /* First process all values computed in the source basic block.  */
342   for (insn = NEXT_INSN (BB_HEAD (e->src));
343        insn != NEXT_INSN (BB_END (e->src));
344        insn = NEXT_INSN (insn))
345     if (INSN_P (insn))
346       cselib_process_insn (insn);
347 
348   nonequal = BITMAP_ALLOC (NULL);
349   CLEAR_REG_SET (nonequal);
350 
351   /* Now assume that we've continued by the edge E to B and continue
352      processing as if it were same basic block.
353      Our goal is to prove that whole block is an NOOP.  */
354 
355   for (insn = NEXT_INSN (BB_HEAD (b));
356        insn != NEXT_INSN (BB_END (b)) && !failed;
357        insn = NEXT_INSN (insn))
358     {
359       if (INSN_P (insn))
360 	{
361 	  rtx pat = PATTERN (insn);
362 
363 	  if (GET_CODE (pat) == PARALLEL)
364 	    {
365 	      for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
366 		failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
367 	    }
368 	  else
369 	    failed |= mark_effect (pat, nonequal);
370 	}
371 
372       cselib_process_insn (insn);
373     }
374 
375   /* Later we should clear nonequal of dead registers.  So far we don't
376      have life information in cfg_cleanup.  */
377   if (failed)
378     {
379       b->flags |= BB_NONTHREADABLE_BLOCK;
380       goto failed_exit;
381     }
382 
383   /* cond2 must not mention any register that is not equal to the
384      former block.  */
385   if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
386     goto failed_exit;
387 
388   EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
389     goto failed_exit;
390 
391   BITMAP_FREE (nonequal);
392   cselib_finish ();
393   if ((comparison_dominates_p (code1, code2) != 0)
394       != (XEXP (SET_SRC (set2), 1) == pc_rtx))
395     return BRANCH_EDGE (b);
396   else
397     return FALLTHRU_EDGE (b);
398 
399 failed_exit:
400   BITMAP_FREE (nonequal);
401   cselib_finish ();
402   return NULL;
403 }
404 
405 /* Attempt to forward edges leaving basic block B.
406    Return true if successful.  */
407 
408 static bool
409 try_forward_edges (int mode, basic_block b)
410 {
411   bool changed = false;
412   edge_iterator ei;
413   edge e, *threaded_edges = NULL;
414 
415   /* If we are partitioning hot/cold basic blocks, we don't want to
416      mess up unconditional or indirect jumps that cross between hot
417      and cold sections.
418 
419      Basic block partitioning may result in some jumps that appear to
420      be optimizable (or blocks that appear to be mergeable), but which really
421      must be left untouched (they are required to make it safely across
422      partition boundaries).  See the comments at the top of
423      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
424 
425   if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
426     return false;
427 
428   for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
429     {
430       basic_block target, first;
431       int counter, goto_locus;
432       bool threaded = false;
433       int nthreaded_edges = 0;
434       bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0;
435 
436       /* Skip complex edges because we don't know how to update them.
437 
438 	 Still handle fallthru edges, as we can succeed to forward fallthru
439 	 edge to the same place as the branch edge of conditional branch
440 	 and turn conditional branch to an unconditional branch.  */
441       if (e->flags & EDGE_COMPLEX)
442 	{
443 	  ei_next (&ei);
444 	  continue;
445 	}
446 
447       target = first = e->dest;
448       counter = NUM_FIXED_BLOCKS;
449       goto_locus = e->goto_locus;
450 
451       /* If we are partitioning hot/cold basic_blocks, we don't want to mess
452 	 up jumps that cross between hot/cold sections.
453 
454 	 Basic block partitioning may result in some jumps that appear
455 	 to be optimizable (or blocks that appear to be mergeable), but which
456 	 really must be left untouched (they are required to make it safely
457 	 across partition boundaries).  See the comments at the top of
458 	 bb-reorder.c:partition_hot_cold_basic_blocks for complete
459 	 details.  */
460 
461       if (first != EXIT_BLOCK_PTR
462 	  && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
463 	return false;
464 
465       while (counter < n_basic_blocks)
466 	{
467 	  basic_block new_target = NULL;
468 	  bool new_target_threaded = false;
469 	  may_thread |= (target->flags & BB_MODIFIED) != 0;
470 
471 	  if (FORWARDER_BLOCK_P (target)
472 	      && !(single_succ_edge (target)->flags & EDGE_CROSSING)
473 	      && single_succ (target) != EXIT_BLOCK_PTR)
474 	    {
475 	      /* Bypass trivial infinite loops.  */
476 	      new_target = single_succ (target);
477 	      if (target == new_target)
478 		counter = n_basic_blocks;
479 	      else if (!optimize)
480 		{
481 		  /* When not optimizing, ensure that edges or forwarder
482 		     blocks with different locus are not optimized out.  */
483 		  int new_locus = single_succ_edge (target)->goto_locus;
484 		  int locus = goto_locus;
485 
486 		  if (new_locus && locus && !locator_eq (new_locus, locus))
487 		    new_target = NULL;
488 		  else
489 		    {
490 		      rtx last;
491 
492 		      if (new_locus)
493 			locus = new_locus;
494 
495 		      last = BB_END (target);
496 		      if (DEBUG_INSN_P (last))
497 			last = prev_nondebug_insn (last);
498 
499 		      new_locus = last && INSN_P (last)
500 				  ? INSN_LOCATOR (last) : 0;
501 
502 		      if (new_locus && locus && !locator_eq (new_locus, locus))
503 			new_target = NULL;
504 		      else
505 			{
506 			  if (new_locus)
507 			    locus = new_locus;
508 
509 			  goto_locus = locus;
510 			}
511 		    }
512 		}
513 	    }
514 
515 	  /* Allow to thread only over one edge at time to simplify updating
516 	     of probabilities.  */
517 	  else if ((mode & CLEANUP_THREADING) && may_thread)
518 	    {
519 	      edge t = thread_jump (e, target);
520 	      if (t)
521 		{
522 		  if (!threaded_edges)
523 		    threaded_edges = XNEWVEC (edge, n_basic_blocks);
524 		  else
525 		    {
526 		      int i;
527 
528 		      /* Detect an infinite loop across blocks not
529 			 including the start block.  */
530 		      for (i = 0; i < nthreaded_edges; ++i)
531 			if (threaded_edges[i] == t)
532 			  break;
533 		      if (i < nthreaded_edges)
534 			{
535 			  counter = n_basic_blocks;
536 			  break;
537 			}
538 		    }
539 
540 		  /* Detect an infinite loop across the start block.  */
541 		  if (t->dest == b)
542 		    break;
543 
544 		  gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS);
545 		  threaded_edges[nthreaded_edges++] = t;
546 
547 		  new_target = t->dest;
548 		  new_target_threaded = true;
549 		}
550 	    }
551 
552 	  if (!new_target)
553 	    break;
554 
555 	  counter++;
556 	  target = new_target;
557 	  threaded |= new_target_threaded;
558 	}
559 
560       if (counter >= n_basic_blocks)
561 	{
562 	  if (dump_file)
563 	    fprintf (dump_file, "Infinite loop in BB %i.\n",
564 		     target->index);
565 	}
566       else if (target == first)
567 	; /* We didn't do anything.  */
568       else
569 	{
570 	  /* Save the values now, as the edge may get removed.  */
571 	  gcov_type edge_count = e->count;
572 	  int edge_probability = e->probability;
573 	  int edge_frequency;
574 	  int n = 0;
575 
576 	  e->goto_locus = goto_locus;
577 
578 	  /* Don't force if target is exit block.  */
579 	  if (threaded && target != EXIT_BLOCK_PTR)
580 	    {
581 	      notice_new_block (redirect_edge_and_branch_force (e, target));
582 	      if (dump_file)
583 		fprintf (dump_file, "Conditionals threaded.\n");
584 	    }
585 	  else if (!redirect_edge_and_branch (e, target))
586 	    {
587 	      if (dump_file)
588 		fprintf (dump_file,
589 			 "Forwarding edge %i->%i to %i failed.\n",
590 			 b->index, e->dest->index, target->index);
591 	      ei_next (&ei);
592 	      continue;
593 	    }
594 
595 	  /* We successfully forwarded the edge.  Now update profile
596 	     data: for each edge we traversed in the chain, remove
597 	     the original edge's execution count.  */
598 	  edge_frequency = ((edge_probability * b->frequency
599 			     + REG_BR_PROB_BASE / 2)
600 			    / REG_BR_PROB_BASE);
601 
602 	  do
603 	    {
604 	      edge t;
605 
606 	      if (!single_succ_p (first))
607 		{
608 		  gcc_assert (n < nthreaded_edges);
609 		  t = threaded_edges [n++];
610 		  gcc_assert (t->src == first);
611 		  update_bb_profile_for_threading (first, edge_frequency,
612 						   edge_count, t);
613 		  update_br_prob_note (first);
614 		}
615 	      else
616 		{
617 		  first->count -= edge_count;
618 		  if (first->count < 0)
619 		    first->count = 0;
620 		  first->frequency -= edge_frequency;
621 		  if (first->frequency < 0)
622 		    first->frequency = 0;
623 		  /* It is possible that as the result of
624 		     threading we've removed edge as it is
625 		     threaded to the fallthru edge.  Avoid
626 		     getting out of sync.  */
627 		  if (n < nthreaded_edges
628 		      && first == threaded_edges [n]->src)
629 		    n++;
630 		  t = single_succ_edge (first);
631 		}
632 
633 	      t->count -= edge_count;
634 	      if (t->count < 0)
635 		t->count = 0;
636 	      first = t->dest;
637 	    }
638 	  while (first != target);
639 
640 	  changed = true;
641 	  continue;
642 	}
643       ei_next (&ei);
644     }
645 
646   free (threaded_edges);
647   return changed;
648 }
649 
650 
651 /* Blocks A and B are to be merged into a single block.  A has no incoming
652    fallthru edge, so it can be moved before B without adding or modifying
653    any jumps (aside from the jump from A to B).  */
654 
655 static void
656 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
657 {
658   rtx barrier;
659 
660   /* If we are partitioning hot/cold basic blocks, we don't want to
661      mess up unconditional or indirect jumps that cross between hot
662      and cold sections.
663 
664      Basic block partitioning may result in some jumps that appear to
665      be optimizable (or blocks that appear to be mergeable), but which really
666      must be left untouched (they are required to make it safely across
667      partition boundaries).  See the comments at the top of
668      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
669 
670   if (BB_PARTITION (a) != BB_PARTITION (b))
671     return;
672 
673   barrier = next_nonnote_insn (BB_END (a));
674   gcc_assert (BARRIER_P (barrier));
675   delete_insn (barrier);
676 
677   /* Scramble the insn chain.  */
678   if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
679     reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
680   df_set_bb_dirty (a);
681 
682   if (dump_file)
683     fprintf (dump_file, "Moved block %d before %d and merged.\n",
684 	     a->index, b->index);
685 
686   /* Swap the records for the two blocks around.  */
687 
688   unlink_block (a);
689   link_block (a, b->prev_bb);
690 
691   /* Now blocks A and B are contiguous.  Merge them.  */
692   merge_blocks (a, b);
693 }
694 
695 /* Blocks A and B are to be merged into a single block.  B has no outgoing
696    fallthru edge, so it can be moved after A without adding or modifying
697    any jumps (aside from the jump from A to B).  */
698 
699 static void
700 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
701 {
702   rtx barrier, real_b_end;
703   rtx label, table;
704 
705   /* If we are partitioning hot/cold basic blocks, we don't want to
706      mess up unconditional or indirect jumps that cross between hot
707      and cold sections.
708 
709      Basic block partitioning may result in some jumps that appear to
710      be optimizable (or blocks that appear to be mergeable), but which really
711      must be left untouched (they are required to make it safely across
712      partition boundaries).  See the comments at the top of
713      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
714 
715   if (BB_PARTITION (a) != BB_PARTITION (b))
716     return;
717 
718   real_b_end = BB_END (b);
719 
720   /* If there is a jump table following block B temporarily add the jump table
721      to block B so that it will also be moved to the correct location.  */
722   if (tablejump_p (BB_END (b), &label, &table)
723       && prev_active_insn (label) == BB_END (b))
724     {
725       BB_END (b) = table;
726     }
727 
728   /* There had better have been a barrier there.  Delete it.  */
729   barrier = NEXT_INSN (BB_END (b));
730   if (barrier && BARRIER_P (barrier))
731     delete_insn (barrier);
732 
733 
734   /* Scramble the insn chain.  */
735   reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
736 
737   /* Restore the real end of b.  */
738   BB_END (b) = real_b_end;
739 
740   if (dump_file)
741     fprintf (dump_file, "Moved block %d after %d and merged.\n",
742 	     b->index, a->index);
743 
744   /* Now blocks A and B are contiguous.  Merge them.  */
745   merge_blocks (a, b);
746 }
747 
748 /* Attempt to merge basic blocks that are potentially non-adjacent.
749    Return NULL iff the attempt failed, otherwise return basic block
750    where cleanup_cfg should continue.  Because the merging commonly
751    moves basic block away or introduces another optimization
752    possibility, return basic block just before B so cleanup_cfg don't
753    need to iterate.
754 
755    It may be good idea to return basic block before C in the case
756    C has been moved after B and originally appeared earlier in the
757    insn sequence, but we have no information available about the
758    relative ordering of these two.  Hopefully it is not too common.  */
759 
760 static basic_block
761 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
762 {
763   basic_block next;
764 
765   /* If we are partitioning hot/cold basic blocks, we don't want to
766      mess up unconditional or indirect jumps that cross between hot
767      and cold sections.
768 
769      Basic block partitioning may result in some jumps that appear to
770      be optimizable (or blocks that appear to be mergeable), but which really
771      must be left untouched (they are required to make it safely across
772      partition boundaries).  See the comments at the top of
773      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
774 
775   if (BB_PARTITION (b) != BB_PARTITION (c))
776     return NULL;
777 
778   /* If B has a fallthru edge to C, no need to move anything.  */
779   if (e->flags & EDGE_FALLTHRU)
780     {
781       int b_index = b->index, c_index = c->index;
782       merge_blocks (b, c);
783       update_forwarder_flag (b);
784 
785       if (dump_file)
786 	fprintf (dump_file, "Merged %d and %d without moving.\n",
787 		 b_index, c_index);
788 
789       return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
790     }
791 
792   /* Otherwise we will need to move code around.  Do that only if expensive
793      transformations are allowed.  */
794   else if (mode & CLEANUP_EXPENSIVE)
795     {
796       edge tmp_edge, b_fallthru_edge;
797       bool c_has_outgoing_fallthru;
798       bool b_has_incoming_fallthru;
799 
800       /* Avoid overactive code motion, as the forwarder blocks should be
801 	 eliminated by edge redirection instead.  One exception might have
802 	 been if B is a forwarder block and C has no fallthru edge, but
803 	 that should be cleaned up by bb-reorder instead.  */
804       if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
805 	return NULL;
806 
807       /* We must make sure to not munge nesting of lexical blocks,
808 	 and loop notes.  This is done by squeezing out all the notes
809 	 and leaving them there to lie.  Not ideal, but functional.  */
810 
811       tmp_edge = find_fallthru_edge (c->succs);
812       c_has_outgoing_fallthru = (tmp_edge != NULL);
813 
814       tmp_edge = find_fallthru_edge (b->preds);
815       b_has_incoming_fallthru = (tmp_edge != NULL);
816       b_fallthru_edge = tmp_edge;
817       next = b->prev_bb;
818       if (next == c)
819 	next = next->prev_bb;
820 
821       /* Otherwise, we're going to try to move C after B.  If C does
822 	 not have an outgoing fallthru, then it can be moved
823 	 immediately after B without introducing or modifying jumps.  */
824       if (! c_has_outgoing_fallthru)
825 	{
826 	  merge_blocks_move_successor_nojumps (b, c);
827 	  return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
828 	}
829 
830       /* If B does not have an incoming fallthru, then it can be moved
831 	 immediately before C without introducing or modifying jumps.
832 	 C cannot be the first block, so we do not have to worry about
833 	 accessing a non-existent block.  */
834 
835       if (b_has_incoming_fallthru)
836 	{
837 	  basic_block bb;
838 
839 	  if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
840 	    return NULL;
841 	  bb = force_nonfallthru (b_fallthru_edge);
842 	  if (bb)
843 	    notice_new_block (bb);
844 	}
845 
846       merge_blocks_move_predecessor_nojumps (b, c);
847       return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
848     }
849 
850   return NULL;
851 }
852 
853 
854 /* Removes the memory attributes of MEM expression
855    if they are not equal.  */
856 
857 void
858 merge_memattrs (rtx x, rtx y)
859 {
860   int i;
861   int j;
862   enum rtx_code code;
863   const char *fmt;
864 
865   if (x == y)
866     return;
867   if (x == 0 || y == 0)
868     return;
869 
870   code = GET_CODE (x);
871 
872   if (code != GET_CODE (y))
873     return;
874 
875   if (GET_MODE (x) != GET_MODE (y))
876     return;
877 
878   if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
879     {
880       if (! MEM_ATTRS (x))
881 	MEM_ATTRS (y) = 0;
882       else if (! MEM_ATTRS (y))
883 	MEM_ATTRS (x) = 0;
884       else
885 	{
886 	  HOST_WIDE_INT mem_size;
887 
888 	  if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
889 	    {
890 	      set_mem_alias_set (x, 0);
891 	      set_mem_alias_set (y, 0);
892 	    }
893 
894 	  if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
895 	    {
896 	      set_mem_expr (x, 0);
897 	      set_mem_expr (y, 0);
898 	      clear_mem_offset (x);
899 	      clear_mem_offset (y);
900 	    }
901 	  else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y)
902 		   || (MEM_OFFSET_KNOWN_P (x)
903 		       && MEM_OFFSET (x) != MEM_OFFSET (y)))
904 	    {
905 	      clear_mem_offset (x);
906 	      clear_mem_offset (y);
907 	    }
908 
909 	  if (MEM_SIZE_KNOWN_P (x) && MEM_SIZE_KNOWN_P (y))
910 	    {
911 	      mem_size = MAX (MEM_SIZE (x), MEM_SIZE (y));
912 	      set_mem_size (x, mem_size);
913 	      set_mem_size (y, mem_size);
914 	    }
915 	  else
916 	    {
917 	      clear_mem_size (x);
918 	      clear_mem_size (y);
919 	    }
920 
921 	  set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
922 	  set_mem_align (y, MEM_ALIGN (x));
923 	}
924     }
925 
926   fmt = GET_RTX_FORMAT (code);
927   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
928     {
929       switch (fmt[i])
930 	{
931 	case 'E':
932 	  /* Two vectors must have the same length.  */
933 	  if (XVECLEN (x, i) != XVECLEN (y, i))
934 	    return;
935 
936 	  for (j = 0; j < XVECLEN (x, i); j++)
937 	    merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
938 
939 	  break;
940 
941 	case 'e':
942 	  merge_memattrs (XEXP (x, i), XEXP (y, i));
943 	}
944     }
945   return;
946 }
947 
948 
949  /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
950     different single sets S1 and S2.  */
951 
952 static bool
953 equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2)
954 {
955   int i;
956   rtx e1, e2;
957 
958   if (p1 == s1 && p2 == s2)
959     return true;
960 
961   if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL)
962     return false;
963 
964   if (XVECLEN (p1, 0) != XVECLEN (p2, 0))
965     return false;
966 
967   for (i = 0; i < XVECLEN (p1, 0); i++)
968     {
969       e1 = XVECEXP (p1, 0, i);
970       e2 = XVECEXP (p2, 0, i);
971       if (e1 == s1 && e2 == s2)
972         continue;
973       if (reload_completed
974           ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2))
975         continue;
976 
977         return false;
978     }
979 
980   return true;
981 }
982 
983 /* Examine register notes on I1 and I2 and return:
984    - dir_forward if I1 can be replaced by I2, or
985    - dir_backward if I2 can be replaced by I1, or
986    - dir_both if both are the case.  */
987 
988 static enum replace_direction
989 can_replace_by (rtx i1, rtx i2)
990 {
991   rtx s1, s2, d1, d2, src1, src2, note1, note2;
992   bool c1, c2;
993 
994   /* Check for 2 sets.  */
995   s1 = single_set (i1);
996   s2 = single_set (i2);
997   if (s1 == NULL_RTX || s2 == NULL_RTX)
998     return dir_none;
999 
1000   /* Check that the 2 sets set the same dest.  */
1001   d1 = SET_DEST (s1);
1002   d2 = SET_DEST (s2);
1003   if (!(reload_completed
1004         ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2)))
1005     return dir_none;
1006 
1007   /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
1008      set dest to the same value.  */
1009   note1 = find_reg_equal_equiv_note (i1);
1010   note2 = find_reg_equal_equiv_note (i2);
1011   if (!note1 || !note2 || !rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0))
1012       || !CONST_INT_P (XEXP (note1, 0)))
1013     return dir_none;
1014 
1015   if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2))
1016     return dir_none;
1017 
1018   /* Although the 2 sets set dest to the same value, we cannot replace
1019        (set (dest) (const_int))
1020      by
1021        (set (dest) (reg))
1022      because we don't know if the reg is live and has the same value at the
1023      location of replacement.  */
1024   src1 = SET_SRC (s1);
1025   src2 = SET_SRC (s2);
1026   c1 = CONST_INT_P (src1);
1027   c2 = CONST_INT_P (src2);
1028   if (c1 && c2)
1029     return dir_both;
1030   else if (c2)
1031     return dir_forward;
1032   else if (c1)
1033     return dir_backward;
1034 
1035   return dir_none;
1036 }
1037 
1038 /* Merges directions A and B.  */
1039 
1040 static enum replace_direction
1041 merge_dir (enum replace_direction a, enum replace_direction b)
1042 {
1043   /* Implements the following table:
1044         |bo fw bw no
1045      ---+-----------
1046      bo |bo fw bw no
1047      fw |-- fw no no
1048      bw |-- -- bw no
1049      no |-- -- -- no.  */
1050 
1051   if (a == b)
1052     return a;
1053 
1054   if (a == dir_both)
1055     return b;
1056   if (b == dir_both)
1057     return a;
1058 
1059   return dir_none;
1060 }
1061 
1062 /* Examine I1 and I2 and return:
1063    - dir_forward if I1 can be replaced by I2, or
1064    - dir_backward if I2 can be replaced by I1, or
1065    - dir_both if both are the case.  */
1066 
1067 static enum replace_direction
1068 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
1069 {
1070   rtx p1, p2;
1071 
1072   /* Verify that I1 and I2 are equivalent.  */
1073   if (GET_CODE (i1) != GET_CODE (i2))
1074     return dir_none;
1075 
1076   /* __builtin_unreachable() may lead to empty blocks (ending with
1077      NOTE_INSN_BASIC_BLOCK).  They may be crossjumped. */
1078   if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
1079     return dir_both;
1080 
1081   /* ??? Do not allow cross-jumping between different stack levels.  */
1082   p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL);
1083   p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL);
1084   if (p1 && p2)
1085     {
1086       p1 = XEXP (p1, 0);
1087       p2 = XEXP (p2, 0);
1088       if (!rtx_equal_p (p1, p2))
1089         return dir_none;
1090 
1091       /* ??? Worse, this adjustment had better be constant lest we
1092          have differing incoming stack levels.  */
1093       if (!frame_pointer_needed
1094           && find_args_size_adjust (i1) == HOST_WIDE_INT_MIN)
1095 	return dir_none;
1096     }
1097   else if (p1 || p2)
1098     return dir_none;
1099 
1100   p1 = PATTERN (i1);
1101   p2 = PATTERN (i2);
1102 
1103   if (GET_CODE (p1) != GET_CODE (p2))
1104     return dir_none;
1105 
1106   /* If this is a CALL_INSN, compare register usage information.
1107      If we don't check this on stack register machines, the two
1108      CALL_INSNs might be merged leaving reg-stack.c with mismatching
1109      numbers of stack registers in the same basic block.
1110      If we don't check this on machines with delay slots, a delay slot may
1111      be filled that clobbers a parameter expected by the subroutine.
1112 
1113      ??? We take the simple route for now and assume that if they're
1114      equal, they were constructed identically.
1115 
1116      Also check for identical exception regions.  */
1117 
1118   if (CALL_P (i1))
1119     {
1120       /* Ensure the same EH region.  */
1121       rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
1122       rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);
1123 
1124       if (!n1 && n2)
1125 	return dir_none;
1126 
1127       if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1128 	return dir_none;
1129 
1130       if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1131 			CALL_INSN_FUNCTION_USAGE (i2))
1132 	  || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
1133 	return dir_none;
1134     }
1135 
1136 #ifdef STACK_REGS
1137   /* If cross_jump_death_matters is not 0, the insn's mode
1138      indicates whether or not the insn contains any stack-like
1139      regs.  */
1140 
1141   if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1142     {
1143       /* If register stack conversion has already been done, then
1144 	 death notes must also be compared before it is certain that
1145 	 the two instruction streams match.  */
1146 
1147       rtx note;
1148       HARD_REG_SET i1_regset, i2_regset;
1149 
1150       CLEAR_HARD_REG_SET (i1_regset);
1151       CLEAR_HARD_REG_SET (i2_regset);
1152 
1153       for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1154 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1155 	  SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1156 
1157       for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1158 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1159 	  SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1160 
1161       if (!hard_reg_set_equal_p (i1_regset, i2_regset))
1162 	return dir_none;
1163     }
1164 #endif
1165 
1166   if (reload_completed
1167       ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1168     return dir_both;
1169 
1170   return can_replace_by (i1, i2);
1171 }
1172 
1173 /* When comparing insns I1 and I2 in flow_find_cross_jump or
1174    flow_find_head_matching_sequence, ensure the notes match.  */
1175 
1176 static void
1177 merge_notes (rtx i1, rtx i2)
1178 {
1179   /* If the merged insns have different REG_EQUAL notes, then
1180      remove them.  */
1181   rtx equiv1 = find_reg_equal_equiv_note (i1);
1182   rtx equiv2 = find_reg_equal_equiv_note (i2);
1183 
1184   if (equiv1 && !equiv2)
1185     remove_note (i1, equiv1);
1186   else if (!equiv1 && equiv2)
1187     remove_note (i2, equiv2);
1188   else if (equiv1 && equiv2
1189 	   && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1190     {
1191       remove_note (i1, equiv1);
1192       remove_note (i2, equiv2);
1193     }
1194 }
1195 
1196  /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
1197     resulting insn in I1, and the corresponding bb in BB1.  At the head of a
1198     bb, if there is a predecessor bb that reaches this bb via fallthru, and
1199     FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
1200     DID_FALLTHRU.  Otherwise, stops at the head of the bb.  */
1201 
1202 static void
1203 walk_to_nondebug_insn (rtx *i1, basic_block *bb1, bool follow_fallthru,
1204                        bool *did_fallthru)
1205 {
1206   edge fallthru;
1207 
1208   *did_fallthru = false;
1209 
1210   /* Ignore notes.  */
1211   while (!NONDEBUG_INSN_P (*i1))
1212     {
1213       if (*i1 != BB_HEAD (*bb1))
1214         {
1215           *i1 = PREV_INSN (*i1);
1216           continue;
1217         }
1218 
1219       if (!follow_fallthru)
1220         return;
1221 
1222       fallthru = find_fallthru_edge ((*bb1)->preds);
1223       if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FUNCTION (cfun)
1224           || !single_succ_p (fallthru->src))
1225         return;
1226 
1227       *bb1 = fallthru->src;
1228       *i1 = BB_END (*bb1);
1229       *did_fallthru = true;
1230      }
1231 }
1232 
1233 /* Look through the insns at the end of BB1 and BB2 and find the longest
1234    sequence that are either equivalent, or allow forward or backward
1235    replacement.  Store the first insns for that sequence in *F1 and *F2 and
1236    return the sequence length.
1237 
1238    DIR_P indicates the allowed replacement direction on function entry, and
1239    the actual replacement direction on function exit.  If NULL, only equivalent
1240    sequences are allowed.
1241 
1242    To simplify callers of this function, if the blocks match exactly,
1243    store the head of the blocks in *F1 and *F2.  */
1244 
1245 int
1246 flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx *f1, rtx *f2,
1247                       enum replace_direction *dir_p)
1248 {
1249   rtx i1, i2, last1, last2, afterlast1, afterlast2;
1250   int ninsns = 0;
1251   rtx p1;
1252   enum replace_direction dir, last_dir, afterlast_dir;
1253   bool follow_fallthru, did_fallthru;
1254 
1255   if (dir_p)
1256     dir = *dir_p;
1257   else
1258     dir = dir_both;
1259   afterlast_dir = dir;
1260   last_dir = afterlast_dir;
1261 
1262   /* Skip simple jumps at the end of the blocks.  Complex jumps still
1263      need to be compared for equivalence, which we'll do below.  */
1264 
1265   i1 = BB_END (bb1);
1266   last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1267   if (onlyjump_p (i1)
1268       || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1269     {
1270       last1 = i1;
1271       i1 = PREV_INSN (i1);
1272     }
1273 
1274   i2 = BB_END (bb2);
1275   if (onlyjump_p (i2)
1276       || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1277     {
1278       last2 = i2;
1279       /* Count everything except for unconditional jump as insn.  */
1280       if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1281 	ninsns++;
1282       i2 = PREV_INSN (i2);
1283     }
1284 
1285   while (true)
1286     {
1287       /* In the following example, we can replace all jumps to C by jumps to A.
1288 
1289          This removes 4 duplicate insns.
1290          [bb A] insn1            [bb C] insn1
1291                 insn2                   insn2
1292          [bb B] insn3                   insn3
1293                 insn4                   insn4
1294                 jump_insn               jump_insn
1295 
1296          We could also replace all jumps to A by jumps to C, but that leaves B
1297          alive, and removes only 2 duplicate insns.  In a subsequent crossjump
1298          step, all jumps to B would be replaced with jumps to the middle of C,
1299          achieving the same result with more effort.
1300          So we allow only the first possibility, which means that we don't allow
1301          fallthru in the block that's being replaced.  */
1302 
1303       follow_fallthru = dir_p && dir != dir_forward;
1304       walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru);
1305       if (did_fallthru)
1306         dir = dir_backward;
1307 
1308       follow_fallthru = dir_p && dir != dir_backward;
1309       walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru);
1310       if (did_fallthru)
1311         dir = dir_forward;
1312 
1313       if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1314 	break;
1315 
1316       dir = merge_dir (dir, old_insns_match_p (0, i1, i2));
1317       if (dir == dir_none || (!dir_p && dir != dir_both))
1318 	break;
1319 
1320       merge_memattrs (i1, i2);
1321 
1322       /* Don't begin a cross-jump with a NOTE insn.  */
1323       if (INSN_P (i1))
1324 	{
1325 	  merge_notes (i1, i2);
1326 
1327 	  afterlast1 = last1, afterlast2 = last2;
1328 	  last1 = i1, last2 = i2;
1329 	  afterlast_dir = last_dir;
1330 	  last_dir = dir;
1331 	  p1 = PATTERN (i1);
1332 	  if (!(GET_CODE (p1) == USE || GET_CODE (p1) == CLOBBER))
1333 	    ninsns++;
1334 	}
1335 
1336       i1 = PREV_INSN (i1);
1337       i2 = PREV_INSN (i2);
1338     }
1339 
1340 #ifdef HAVE_cc0
1341   /* Don't allow the insn after a compare to be shared by
1342      cross-jumping unless the compare is also shared.  */
1343   if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1344     last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--;
1345 #endif
1346 
1347   /* Include preceding notes and labels in the cross-jump.  One,
1348      this may bring us to the head of the blocks as requested above.
1349      Two, it keeps line number notes as matched as may be.  */
1350   if (ninsns)
1351     {
1352       bb1 = BLOCK_FOR_INSN (last1);
1353       while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
1354 	last1 = PREV_INSN (last1);
1355 
1356       if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1357 	last1 = PREV_INSN (last1);
1358 
1359       bb2 = BLOCK_FOR_INSN (last2);
1360       while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
1361 	last2 = PREV_INSN (last2);
1362 
1363       if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1364 	last2 = PREV_INSN (last2);
1365 
1366       *f1 = last1;
1367       *f2 = last2;
1368     }
1369 
1370   if (dir_p)
1371     *dir_p = last_dir;
1372   return ninsns;
1373 }
1374 
1375 /* Like flow_find_cross_jump, except start looking for a matching sequence from
1376    the head of the two blocks.  Do not include jumps at the end.
1377    If STOP_AFTER is nonzero, stop after finding that many matching
1378    instructions.  */
1379 
1380 int
1381 flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx *f1,
1382 				  rtx *f2, int stop_after)
1383 {
1384   rtx i1, i2, last1, last2, beforelast1, beforelast2;
1385   int ninsns = 0;
1386   edge e;
1387   edge_iterator ei;
1388   int nehedges1 = 0, nehedges2 = 0;
1389 
1390   FOR_EACH_EDGE (e, ei, bb1->succs)
1391     if (e->flags & EDGE_EH)
1392       nehedges1++;
1393   FOR_EACH_EDGE (e, ei, bb2->succs)
1394     if (e->flags & EDGE_EH)
1395       nehedges2++;
1396 
1397   i1 = BB_HEAD (bb1);
1398   i2 = BB_HEAD (bb2);
1399   last1 = beforelast1 = last2 = beforelast2 = NULL_RTX;
1400 
1401   while (true)
1402     {
1403       /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG.  */
1404       while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
1405 	{
1406 	  if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG)
1407 	    break;
1408 	  i1 = NEXT_INSN (i1);
1409 	}
1410 
1411       while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
1412 	{
1413 	  if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG)
1414 	    break;
1415 	  i2 = NEXT_INSN (i2);
1416 	}
1417 
1418       if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1))
1419 	  || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2)))
1420 	break;
1421 
1422       if (NOTE_P (i1) || NOTE_P (i2)
1423 	  || JUMP_P (i1) || JUMP_P (i2))
1424 	break;
1425 
1426       /* A sanity check to make sure we're not merging insns with different
1427 	 effects on EH.  If only one of them ends a basic block, it shouldn't
1428 	 have an EH edge; if both end a basic block, there should be the same
1429 	 number of EH edges.  */
1430       if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
1431 	   && nehedges1 > 0)
1432 	  || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
1433 	      && nehedges2 > 0)
1434 	  || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
1435 	      && nehedges1 != nehedges2))
1436 	break;
1437 
1438       if (old_insns_match_p (0, i1, i2) != dir_both)
1439 	break;
1440 
1441       merge_memattrs (i1, i2);
1442 
1443       /* Don't begin a cross-jump with a NOTE insn.  */
1444       if (INSN_P (i1))
1445 	{
1446 	  merge_notes (i1, i2);
1447 
1448 	  beforelast1 = last1, beforelast2 = last2;
1449 	  last1 = i1, last2 = i2;
1450 	  ninsns++;
1451 	}
1452 
1453       if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
1454 	  || (stop_after > 0 && ninsns == stop_after))
1455 	break;
1456 
1457       i1 = NEXT_INSN (i1);
1458       i2 = NEXT_INSN (i2);
1459     }
1460 
1461 #ifdef HAVE_cc0
1462   /* Don't allow a compare to be shared by cross-jumping unless the insn
1463      after the compare is also shared.  */
1464   if (ninsns && reg_mentioned_p (cc0_rtx, last1) && sets_cc0_p (last1))
1465     last1 = beforelast1, last2 = beforelast2, ninsns--;
1466 #endif
1467 
1468   if (ninsns)
1469     {
1470       *f1 = last1;
1471       *f2 = last2;
1472     }
1473 
1474   return ninsns;
1475 }
1476 
1477 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1478    the branch instruction.  This means that if we commonize the control
1479    flow before end of the basic block, the semantic remains unchanged.
1480 
1481    We may assume that there exists one edge with a common destination.  */
1482 
1483 static bool
1484 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1485 {
1486   int nehedges1 = 0, nehedges2 = 0;
1487   edge fallthru1 = 0, fallthru2 = 0;
1488   edge e1, e2;
1489   edge_iterator ei;
1490   rtx last1, last2;
1491   bool nonfakeedges;
1492 
1493   /* If we performed shrink-wrapping, edges to the EXIT_BLOCK_PTR can
1494      only be distinguished for JUMP_INSNs.  The two paths may differ in
1495      whether they went through the prologue.  Sibcalls are fine, we know
1496      that we either didn't need or inserted an epilogue before them.  */
1497   if (crtl->shrink_wrapped
1498       && single_succ_p (bb1) && single_succ (bb1) == EXIT_BLOCK_PTR
1499       && !JUMP_P (BB_END (bb1))
1500       && !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1))))
1501     return false;
1502 
1503   /* If BB1 has only one successor, we may be looking at either an
1504      unconditional jump, or a fake edge to exit.  */
1505   if (single_succ_p (bb1)
1506       && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1507       && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1508     return (single_succ_p (bb2)
1509 	    && (single_succ_edge (bb2)->flags
1510 		& (EDGE_COMPLEX | EDGE_FAKE)) == 0
1511 	    && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1512 
1513   /* Match conditional jumps - this may get tricky when fallthru and branch
1514      edges are crossed.  */
1515   if (EDGE_COUNT (bb1->succs) == 2
1516       && any_condjump_p (BB_END (bb1))
1517       && onlyjump_p (BB_END (bb1)))
1518     {
1519       edge b1, f1, b2, f2;
1520       bool reverse, match;
1521       rtx set1, set2, cond1, cond2;
1522       enum rtx_code code1, code2;
1523 
1524       if (EDGE_COUNT (bb2->succs) != 2
1525 	  || !any_condjump_p (BB_END (bb2))
1526 	  || !onlyjump_p (BB_END (bb2)))
1527 	return false;
1528 
1529       b1 = BRANCH_EDGE (bb1);
1530       b2 = BRANCH_EDGE (bb2);
1531       f1 = FALLTHRU_EDGE (bb1);
1532       f2 = FALLTHRU_EDGE (bb2);
1533 
1534       /* Get around possible forwarders on fallthru edges.  Other cases
1535 	 should be optimized out already.  */
1536       if (FORWARDER_BLOCK_P (f1->dest))
1537 	f1 = single_succ_edge (f1->dest);
1538 
1539       if (FORWARDER_BLOCK_P (f2->dest))
1540 	f2 = single_succ_edge (f2->dest);
1541 
1542       /* To simplify use of this function, return false if there are
1543 	 unneeded forwarder blocks.  These will get eliminated later
1544 	 during cleanup_cfg.  */
1545       if (FORWARDER_BLOCK_P (f1->dest)
1546 	  || FORWARDER_BLOCK_P (f2->dest)
1547 	  || FORWARDER_BLOCK_P (b1->dest)
1548 	  || FORWARDER_BLOCK_P (b2->dest))
1549 	return false;
1550 
1551       if (f1->dest == f2->dest && b1->dest == b2->dest)
1552 	reverse = false;
1553       else if (f1->dest == b2->dest && b1->dest == f2->dest)
1554 	reverse = true;
1555       else
1556 	return false;
1557 
1558       set1 = pc_set (BB_END (bb1));
1559       set2 = pc_set (BB_END (bb2));
1560       if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1561 	  != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1562 	reverse = !reverse;
1563 
1564       cond1 = XEXP (SET_SRC (set1), 0);
1565       cond2 = XEXP (SET_SRC (set2), 0);
1566       code1 = GET_CODE (cond1);
1567       if (reverse)
1568 	code2 = reversed_comparison_code (cond2, BB_END (bb2));
1569       else
1570 	code2 = GET_CODE (cond2);
1571 
1572       if (code2 == UNKNOWN)
1573 	return false;
1574 
1575       /* Verify codes and operands match.  */
1576       match = ((code1 == code2
1577 		&& rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1578 		&& rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1579 	       || (code1 == swap_condition (code2)
1580 		   && rtx_renumbered_equal_p (XEXP (cond1, 1),
1581 					      XEXP (cond2, 0))
1582 		   && rtx_renumbered_equal_p (XEXP (cond1, 0),
1583 					      XEXP (cond2, 1))));
1584 
1585       /* If we return true, we will join the blocks.  Which means that
1586 	 we will only have one branch prediction bit to work with.  Thus
1587 	 we require the existing branches to have probabilities that are
1588 	 roughly similar.  */
1589       if (match
1590 	  && optimize_bb_for_speed_p (bb1)
1591 	  && optimize_bb_for_speed_p (bb2))
1592 	{
1593 	  int prob2;
1594 
1595 	  if (b1->dest == b2->dest)
1596 	    prob2 = b2->probability;
1597 	  else
1598 	    /* Do not use f2 probability as f2 may be forwarded.  */
1599 	    prob2 = REG_BR_PROB_BASE - b2->probability;
1600 
1601 	  /* Fail if the difference in probabilities is greater than 50%.
1602 	     This rules out two well-predicted branches with opposite
1603 	     outcomes.  */
1604 	  if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1605 	    {
1606 	      if (dump_file)
1607 		fprintf (dump_file,
1608 			 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1609 			 bb1->index, bb2->index, b1->probability, prob2);
1610 
1611 	      return false;
1612 	    }
1613 	}
1614 
1615       if (dump_file && match)
1616 	fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1617 		 bb1->index, bb2->index);
1618 
1619       return match;
1620     }
1621 
1622   /* Generic case - we are seeing a computed jump, table jump or trapping
1623      instruction.  */
1624 
1625   /* Check whether there are tablejumps in the end of BB1 and BB2.
1626      Return true if they are identical.  */
1627     {
1628       rtx label1, label2;
1629       rtx table1, table2;
1630 
1631       if (tablejump_p (BB_END (bb1), &label1, &table1)
1632 	  && tablejump_p (BB_END (bb2), &label2, &table2)
1633 	  && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1634 	{
1635 	  /* The labels should never be the same rtx.  If they really are same
1636 	     the jump tables are same too. So disable crossjumping of blocks BB1
1637 	     and BB2 because when deleting the common insns in the end of BB1
1638 	     by delete_basic_block () the jump table would be deleted too.  */
1639 	  /* If LABEL2 is referenced in BB1->END do not do anything
1640 	     because we would loose information when replacing
1641 	     LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END.  */
1642 	  if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1643 	    {
1644 	      /* Set IDENTICAL to true when the tables are identical.  */
1645 	      bool identical = false;
1646 	      rtx p1, p2;
1647 
1648 	      p1 = PATTERN (table1);
1649 	      p2 = PATTERN (table2);
1650 	      if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1651 		{
1652 		  identical = true;
1653 		}
1654 	      else if (GET_CODE (p1) == ADDR_DIFF_VEC
1655 		       && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1656 		       && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1657 		       && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1658 		{
1659 		  int i;
1660 
1661 		  identical = true;
1662 		  for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1663 		    if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1664 		      identical = false;
1665 		}
1666 
1667 	      if (identical)
1668 		{
1669 		  replace_label_data rr;
1670 		  bool match;
1671 
1672 		  /* Temporarily replace references to LABEL1 with LABEL2
1673 		     in BB1->END so that we could compare the instructions.  */
1674 		  rr.r1 = label1;
1675 		  rr.r2 = label2;
1676 		  rr.update_label_nuses = false;
1677 		  for_each_rtx (&BB_END (bb1), replace_label, &rr);
1678 
1679 		  match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))
1680 			   == dir_both);
1681 		  if (dump_file && match)
1682 		    fprintf (dump_file,
1683 			     "Tablejumps in bb %i and %i match.\n",
1684 			     bb1->index, bb2->index);
1685 
1686 		  /* Set the original label in BB1->END because when deleting
1687 		     a block whose end is a tablejump, the tablejump referenced
1688 		     from the instruction is deleted too.  */
1689 		  rr.r1 = label2;
1690 		  rr.r2 = label1;
1691 		  for_each_rtx (&BB_END (bb1), replace_label, &rr);
1692 
1693 		  return match;
1694 		}
1695 	    }
1696 	  return false;
1697 	}
1698     }
1699 
1700   last1 = BB_END (bb1);
1701   last2 = BB_END (bb2);
1702   if (DEBUG_INSN_P (last1))
1703     last1 = prev_nondebug_insn (last1);
1704   if (DEBUG_INSN_P (last2))
1705     last2 = prev_nondebug_insn (last2);
1706   /* First ensure that the instructions match.  There may be many outgoing
1707      edges so this test is generally cheaper.  */
1708   if (old_insns_match_p (mode, last1, last2) != dir_both)
1709     return false;
1710 
1711   /* Search the outgoing edges, ensure that the counts do match, find possible
1712      fallthru and exception handling edges since these needs more
1713      validation.  */
1714   if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1715     return false;
1716 
1717   nonfakeedges = false;
1718   FOR_EACH_EDGE (e1, ei, bb1->succs)
1719     {
1720       e2 = EDGE_SUCC (bb2, ei.index);
1721 
1722       if ((e1->flags & EDGE_FAKE) == 0)
1723 	nonfakeedges = true;
1724 
1725       if (e1->flags & EDGE_EH)
1726 	nehedges1++;
1727 
1728       if (e2->flags & EDGE_EH)
1729 	nehedges2++;
1730 
1731       if (e1->flags & EDGE_FALLTHRU)
1732 	fallthru1 = e1;
1733       if (e2->flags & EDGE_FALLTHRU)
1734 	fallthru2 = e2;
1735     }
1736 
1737   /* If number of edges of various types does not match, fail.  */
1738   if (nehedges1 != nehedges2
1739       || (fallthru1 != 0) != (fallthru2 != 0))
1740     return false;
1741 
1742   /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors
1743      and the last real insn doesn't have REG_ARGS_SIZE note, don't
1744      attempt to optimize, as the two basic blocks might have different
1745      REG_ARGS_SIZE depths.  For noreturn calls and unconditional
1746      traps there should be REG_ARG_SIZE notes, they could be missing
1747      for __builtin_unreachable () uses though.  */
1748   if (!nonfakeedges
1749       && !ACCUMULATE_OUTGOING_ARGS
1750       && (!INSN_P (last1)
1751           || !find_reg_note (last1, REG_ARGS_SIZE, NULL)))
1752     return false;
1753 
1754   /* fallthru edges must be forwarded to the same destination.  */
1755   if (fallthru1)
1756     {
1757       basic_block d1 = (forwarder_block_p (fallthru1->dest)
1758 			? single_succ (fallthru1->dest): fallthru1->dest);
1759       basic_block d2 = (forwarder_block_p (fallthru2->dest)
1760 			? single_succ (fallthru2->dest): fallthru2->dest);
1761 
1762       if (d1 != d2)
1763 	return false;
1764     }
1765 
1766   /* Ensure the same EH region.  */
1767   {
1768     rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1769     rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1770 
1771     if (!n1 && n2)
1772       return false;
1773 
1774     if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1775       return false;
1776   }
1777 
1778   /* The same checks as in try_crossjump_to_edge. It is required for RTL
1779      version of sequence abstraction.  */
1780   FOR_EACH_EDGE (e1, ei, bb2->succs)
1781     {
1782       edge e2;
1783       edge_iterator ei;
1784       basic_block d1 = e1->dest;
1785 
1786       if (FORWARDER_BLOCK_P (d1))
1787         d1 = EDGE_SUCC (d1, 0)->dest;
1788 
1789       FOR_EACH_EDGE (e2, ei, bb1->succs)
1790         {
1791           basic_block d2 = e2->dest;
1792           if (FORWARDER_BLOCK_P (d2))
1793             d2 = EDGE_SUCC (d2, 0)->dest;
1794           if (d1 == d2)
1795             break;
1796         }
1797 
1798       if (!e2)
1799         return false;
1800     }
1801 
1802   return true;
1803 }
1804 
1805 /* Returns true if BB basic block has a preserve label.  */
1806 
1807 static bool
1808 block_has_preserve_label (basic_block bb)
1809 {
1810   return (bb
1811           && block_label (bb)
1812           && LABEL_PRESERVE_P (block_label (bb)));
1813 }
1814 
1815 /* E1 and E2 are edges with the same destination block.  Search their
1816    predecessors for common code.  If found, redirect control flow from
1817    (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
1818    or the other way around (dir_backward).  DIR specifies the allowed
1819    replacement direction.  */
1820 
1821 static bool
1822 try_crossjump_to_edge (int mode, edge e1, edge e2,
1823                        enum replace_direction dir)
1824 {
1825   int nmatch;
1826   basic_block src1 = e1->src, src2 = e2->src;
1827   basic_block redirect_to, redirect_from, to_remove;
1828   basic_block osrc1, osrc2, redirect_edges_to, tmp;
1829   rtx newpos1, newpos2;
1830   edge s;
1831   edge_iterator ei;
1832 
1833   newpos1 = newpos2 = NULL_RTX;
1834 
1835   /* If we have partitioned hot/cold basic blocks, it is a bad idea
1836      to try this optimization.
1837 
1838      Basic block partitioning may result in some jumps that appear to
1839      be optimizable (or blocks that appear to be mergeable), but which really
1840      must be left untouched (they are required to make it safely across
1841      partition boundaries).  See the comments at the top of
1842      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
1843 
1844   if (flag_reorder_blocks_and_partition && reload_completed)
1845     return false;
1846 
1847   /* Search backward through forwarder blocks.  We don't need to worry
1848      about multiple entry or chained forwarders, as they will be optimized
1849      away.  We do this to look past the unconditional jump following a
1850      conditional jump that is required due to the current CFG shape.  */
1851   if (single_pred_p (src1)
1852       && FORWARDER_BLOCK_P (src1))
1853     e1 = single_pred_edge (src1), src1 = e1->src;
1854 
1855   if (single_pred_p (src2)
1856       && FORWARDER_BLOCK_P (src2))
1857     e2 = single_pred_edge (src2), src2 = e2->src;
1858 
1859   /* Nothing to do if we reach ENTRY, or a common source block.  */
1860   if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1861     return false;
1862   if (src1 == src2)
1863     return false;
1864 
1865   /* Seeing more than 1 forwarder blocks would confuse us later...  */
1866   if (FORWARDER_BLOCK_P (e1->dest)
1867       && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1868     return false;
1869 
1870   if (FORWARDER_BLOCK_P (e2->dest)
1871       && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1872     return false;
1873 
1874   /* Likewise with dead code (possibly newly created by the other optimizations
1875      of cfg_cleanup).  */
1876   if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1877     return false;
1878 
1879   /* Look for the common insn sequence, part the first ...  */
1880   if (!outgoing_edges_match (mode, src1, src2))
1881     return false;
1882 
1883   /* ... and part the second.  */
1884   nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir);
1885 
1886   osrc1 = src1;
1887   osrc2 = src2;
1888   if (newpos1 != NULL_RTX)
1889     src1 = BLOCK_FOR_INSN (newpos1);
1890   if (newpos2 != NULL_RTX)
1891     src2 = BLOCK_FOR_INSN (newpos2);
1892 
1893   if (dir == dir_backward)
1894     {
1895 #define SWAP(T, X, Y) do { T tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
1896       SWAP (basic_block, osrc1, osrc2);
1897       SWAP (basic_block, src1, src2);
1898       SWAP (edge, e1, e2);
1899       SWAP (rtx, newpos1, newpos2);
1900 #undef SWAP
1901     }
1902 
1903   /* Don't proceed with the crossjump unless we found a sufficient number
1904      of matching instructions or the 'from' block was totally matched
1905      (such that its predecessors will hopefully be redirected and the
1906      block removed).  */
1907   if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1908       && (newpos1 != BB_HEAD (src1)))
1909     return false;
1910 
1911   /* Avoid deleting preserve label when redirecting ABNORMAL edges.  */
1912   if (block_has_preserve_label (e1->dest)
1913       && (e1->flags & EDGE_ABNORMAL))
1914     return false;
1915 
1916   /* Here we know that the insns in the end of SRC1 which are common with SRC2
1917      will be deleted.
1918      If we have tablejumps in the end of SRC1 and SRC2
1919      they have been already compared for equivalence in outgoing_edges_match ()
1920      so replace the references to TABLE1 by references to TABLE2.  */
1921     {
1922       rtx label1, label2;
1923       rtx table1, table2;
1924 
1925       if (tablejump_p (BB_END (osrc1), &label1, &table1)
1926 	  && tablejump_p (BB_END (osrc2), &label2, &table2)
1927 	  && label1 != label2)
1928 	{
1929 	  replace_label_data rr;
1930 	  rtx insn;
1931 
1932 	  /* Replace references to LABEL1 with LABEL2.  */
1933 	  rr.r1 = label1;
1934 	  rr.r2 = label2;
1935 	  rr.update_label_nuses = true;
1936 	  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1937 	    {
1938 	      /* Do not replace the label in SRC1->END because when deleting
1939 		 a block whose end is a tablejump, the tablejump referenced
1940 		 from the instruction is deleted too.  */
1941 	      if (insn != BB_END (osrc1))
1942 		for_each_rtx (&insn, replace_label, &rr);
1943 	    }
1944 	}
1945     }
1946 
1947   /* Avoid splitting if possible.  We must always split when SRC2 has
1948      EH predecessor edges, or we may end up with basic blocks with both
1949      normal and EH predecessor edges.  */
1950   if (newpos2 == BB_HEAD (src2)
1951       && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
1952     redirect_to = src2;
1953   else
1954     {
1955       if (newpos2 == BB_HEAD (src2))
1956 	{
1957 	  /* Skip possible basic block header.  */
1958 	  if (LABEL_P (newpos2))
1959 	    newpos2 = NEXT_INSN (newpos2);
1960 	  while (DEBUG_INSN_P (newpos2))
1961 	    newpos2 = NEXT_INSN (newpos2);
1962 	  if (NOTE_P (newpos2))
1963 	    newpos2 = NEXT_INSN (newpos2);
1964 	  while (DEBUG_INSN_P (newpos2))
1965 	    newpos2 = NEXT_INSN (newpos2);
1966 	}
1967 
1968       if (dump_file)
1969 	fprintf (dump_file, "Splitting bb %i before %i insns\n",
1970 		 src2->index, nmatch);
1971       redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1972     }
1973 
1974   if (dump_file)
1975     fprintf (dump_file,
1976 	     "Cross jumping from bb %i to bb %i; %i common insns\n",
1977 	     src1->index, src2->index, nmatch);
1978 
1979   /* We may have some registers visible through the block.  */
1980   df_set_bb_dirty (redirect_to);
1981 
1982   if (osrc2 == src2)
1983     redirect_edges_to = redirect_to;
1984   else
1985     redirect_edges_to = osrc2;
1986 
1987   /* Recompute the frequencies and counts of outgoing edges.  */
1988   FOR_EACH_EDGE (s, ei, redirect_edges_to->succs)
1989     {
1990       edge s2;
1991       edge_iterator ei;
1992       basic_block d = s->dest;
1993 
1994       if (FORWARDER_BLOCK_P (d))
1995 	d = single_succ (d);
1996 
1997       FOR_EACH_EDGE (s2, ei, src1->succs)
1998 	{
1999 	  basic_block d2 = s2->dest;
2000 	  if (FORWARDER_BLOCK_P (d2))
2001 	    d2 = single_succ (d2);
2002 	  if (d == d2)
2003 	    break;
2004 	}
2005 
2006       s->count += s2->count;
2007 
2008       /* Take care to update possible forwarder blocks.  We verified
2009 	 that there is no more than one in the chain, so we can't run
2010 	 into infinite loop.  */
2011       if (FORWARDER_BLOCK_P (s->dest))
2012 	{
2013 	  single_succ_edge (s->dest)->count += s2->count;
2014 	  s->dest->count += s2->count;
2015 	  s->dest->frequency += EDGE_FREQUENCY (s);
2016 	}
2017 
2018       if (FORWARDER_BLOCK_P (s2->dest))
2019 	{
2020 	  single_succ_edge (s2->dest)->count -= s2->count;
2021 	  if (single_succ_edge (s2->dest)->count < 0)
2022 	    single_succ_edge (s2->dest)->count = 0;
2023 	  s2->dest->count -= s2->count;
2024 	  s2->dest->frequency -= EDGE_FREQUENCY (s);
2025 	  if (s2->dest->frequency < 0)
2026 	    s2->dest->frequency = 0;
2027 	  if (s2->dest->count < 0)
2028 	    s2->dest->count = 0;
2029 	}
2030 
2031       if (!redirect_edges_to->frequency && !src1->frequency)
2032 	s->probability = (s->probability + s2->probability) / 2;
2033       else
2034 	s->probability
2035 	  = ((s->probability * redirect_edges_to->frequency +
2036 	      s2->probability * src1->frequency)
2037 	     / (redirect_edges_to->frequency + src1->frequency));
2038     }
2039 
2040   /* Adjust count and frequency for the block.  An earlier jump
2041      threading pass may have left the profile in an inconsistent
2042      state (see update_bb_profile_for_threading) so we must be
2043      prepared for overflows.  */
2044   tmp = redirect_to;
2045   do
2046     {
2047       tmp->count += src1->count;
2048       tmp->frequency += src1->frequency;
2049       if (tmp->frequency > BB_FREQ_MAX)
2050         tmp->frequency = BB_FREQ_MAX;
2051       if (tmp == redirect_edges_to)
2052         break;
2053       tmp = find_fallthru_edge (tmp->succs)->dest;
2054     }
2055   while (true);
2056   update_br_prob_note (redirect_edges_to);
2057 
2058   /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1.  */
2059 
2060   /* Skip possible basic block header.  */
2061   if (LABEL_P (newpos1))
2062     newpos1 = NEXT_INSN (newpos1);
2063 
2064   while (DEBUG_INSN_P (newpos1))
2065     newpos1 = NEXT_INSN (newpos1);
2066 
2067   if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
2068     newpos1 = NEXT_INSN (newpos1);
2069 
2070   while (DEBUG_INSN_P (newpos1))
2071     newpos1 = NEXT_INSN (newpos1);
2072 
2073   redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
2074   to_remove = single_succ (redirect_from);
2075 
2076   redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
2077   delete_basic_block (to_remove);
2078 
2079   update_forwarder_flag (redirect_from);
2080   if (redirect_to != src2)
2081     update_forwarder_flag (src2);
2082 
2083   return true;
2084 }
2085 
2086 /* Search the predecessors of BB for common insn sequences.  When found,
2087    share code between them by redirecting control flow.  Return true if
2088    any changes made.  */
2089 
2090 static bool
2091 try_crossjump_bb (int mode, basic_block bb)
2092 {
2093   edge e, e2, fallthru;
2094   bool changed;
2095   unsigned max, ix, ix2;
2096 
2097   /* Nothing to do if there is not at least two incoming edges.  */
2098   if (EDGE_COUNT (bb->preds) < 2)
2099     return false;
2100 
2101   /* Don't crossjump if this block ends in a computed jump,
2102      unless we are optimizing for size.  */
2103   if (optimize_bb_for_size_p (bb)
2104       && bb != EXIT_BLOCK_PTR
2105       && computed_jump_p (BB_END (bb)))
2106     return false;
2107 
2108   /* If we are partitioning hot/cold basic blocks, we don't want to
2109      mess up unconditional or indirect jumps that cross between hot
2110      and cold sections.
2111 
2112      Basic block partitioning may result in some jumps that appear to
2113      be optimizable (or blocks that appear to be mergeable), but which really
2114      must be left untouched (they are required to make it safely across
2115      partition boundaries).  See the comments at the top of
2116      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
2117 
2118   if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
2119 					BB_PARTITION (EDGE_PRED (bb, 1)->src)
2120       || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
2121     return false;
2122 
2123   /* It is always cheapest to redirect a block that ends in a branch to
2124      a block that falls through into BB, as that adds no branches to the
2125      program.  We'll try that combination first.  */
2126   fallthru = NULL;
2127   max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
2128 
2129   if (EDGE_COUNT (bb->preds) > max)
2130     return false;
2131 
2132   fallthru = find_fallthru_edge (bb->preds);
2133 
2134   changed = false;
2135   for (ix = 0; ix < EDGE_COUNT (bb->preds);)
2136     {
2137       e = EDGE_PRED (bb, ix);
2138       ix++;
2139 
2140       /* As noted above, first try with the fallthru predecessor (or, a
2141 	 fallthru predecessor if we are in cfglayout mode).  */
2142       if (fallthru)
2143 	{
2144 	  /* Don't combine the fallthru edge into anything else.
2145 	     If there is a match, we'll do it the other way around.  */
2146 	  if (e == fallthru)
2147 	    continue;
2148 	  /* If nothing changed since the last attempt, there is nothing
2149 	     we can do.  */
2150 	  if (!first_pass
2151 	      && !((e->src->flags & BB_MODIFIED)
2152 		   || (fallthru->src->flags & BB_MODIFIED)))
2153 	    continue;
2154 
2155 	  if (try_crossjump_to_edge (mode, e, fallthru, dir_forward))
2156 	    {
2157 	      changed = true;
2158 	      ix = 0;
2159 	      continue;
2160 	    }
2161 	}
2162 
2163       /* Non-obvious work limiting check: Recognize that we're going
2164 	 to call try_crossjump_bb on every basic block.  So if we have
2165 	 two blocks with lots of outgoing edges (a switch) and they
2166 	 share lots of common destinations, then we would do the
2167 	 cross-jump check once for each common destination.
2168 
2169 	 Now, if the blocks actually are cross-jump candidates, then
2170 	 all of their destinations will be shared.  Which means that
2171 	 we only need check them for cross-jump candidacy once.  We
2172 	 can eliminate redundant checks of crossjump(A,B) by arbitrarily
2173 	 choosing to do the check from the block for which the edge
2174 	 in question is the first successor of A.  */
2175       if (EDGE_SUCC (e->src, 0) != e)
2176 	continue;
2177 
2178       for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++)
2179 	{
2180 	  e2 = EDGE_PRED (bb, ix2);
2181 
2182 	  if (e2 == e)
2183 	    continue;
2184 
2185 	  /* We've already checked the fallthru edge above.  */
2186 	  if (e2 == fallthru)
2187 	    continue;
2188 
2189 	  /* The "first successor" check above only prevents multiple
2190 	     checks of crossjump(A,B).  In order to prevent redundant
2191 	     checks of crossjump(B,A), require that A be the block
2192 	     with the lowest index.  */
2193 	  if (e->src->index > e2->src->index)
2194 	    continue;
2195 
2196 	  /* If nothing changed since the last attempt, there is nothing
2197 	     we can do.  */
2198 	  if (!first_pass
2199 	      && !((e->src->flags & BB_MODIFIED)
2200 		   || (e2->src->flags & BB_MODIFIED)))
2201 	    continue;
2202 
2203 	  /* Both e and e2 are not fallthru edges, so we can crossjump in either
2204 	     direction.  */
2205 	  if (try_crossjump_to_edge (mode, e, e2, dir_both))
2206 	    {
2207 	      changed = true;
2208 	      ix = 0;
2209 	      break;
2210 	    }
2211 	}
2212     }
2213 
2214   if (changed)
2215     crossjumps_occured = true;
2216 
2217   return changed;
2218 }
2219 
2220 /* Search the successors of BB for common insn sequences.  When found,
2221    share code between them by moving it across the basic block
2222    boundary.  Return true if any changes made.  */
2223 
2224 static bool
2225 try_head_merge_bb (basic_block bb)
2226 {
2227   basic_block final_dest_bb = NULL;
2228   int max_match = INT_MAX;
2229   edge e0;
2230   rtx *headptr, *currptr, *nextptr;
2231   bool changed, moveall;
2232   unsigned ix;
2233   rtx e0_last_head, cond, move_before;
2234   unsigned nedges = EDGE_COUNT (bb->succs);
2235   rtx jump = BB_END (bb);
2236   regset live, live_union;
2237 
2238   /* Nothing to do if there is not at least two outgoing edges.  */
2239   if (nedges < 2)
2240     return false;
2241 
2242   /* Don't crossjump if this block ends in a computed jump,
2243      unless we are optimizing for size.  */
2244   if (optimize_bb_for_size_p (bb)
2245       && bb != EXIT_BLOCK_PTR
2246       && computed_jump_p (BB_END (bb)))
2247     return false;
2248 
2249   cond = get_condition (jump, &move_before, true, false);
2250   if (cond == NULL_RTX)
2251     {
2252 #ifdef HAVE_cc0
2253       if (reg_mentioned_p (cc0_rtx, jump))
2254 	move_before = prev_nonnote_nondebug_insn (jump);
2255       else
2256 #endif
2257 	move_before = jump;
2258     }
2259 
2260   for (ix = 0; ix < nedges; ix++)
2261     if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR)
2262       return false;
2263 
2264   for (ix = 0; ix < nedges; ix++)
2265     {
2266       edge e = EDGE_SUCC (bb, ix);
2267       basic_block other_bb = e->dest;
2268 
2269       if (df_get_bb_dirty (other_bb))
2270 	{
2271 	  block_was_dirty = true;
2272 	  return false;
2273 	}
2274 
2275       if (e->flags & EDGE_ABNORMAL)
2276 	return false;
2277 
2278       /* Normally, all destination blocks must only be reachable from this
2279 	 block, i.e. they must have one incoming edge.
2280 
2281 	 There is one special case we can handle, that of multiple consecutive
2282 	 jumps where the first jumps to one of the targets of the second jump.
2283 	 This happens frequently in switch statements for default labels.
2284 	 The structure is as follows:
2285 	 FINAL_DEST_BB
2286 	 ....
2287 	 if (cond) jump A;
2288 	 fall through
2289 	 BB
2290 	 jump with targets A, B, C, D...
2291 	 A
2292 	 has two incoming edges, from FINAL_DEST_BB and BB
2293 
2294 	 In this case, we can try to move the insns through BB and into
2295 	 FINAL_DEST_BB.  */
2296       if (EDGE_COUNT (other_bb->preds) != 1)
2297 	{
2298 	  edge incoming_edge, incoming_bb_other_edge;
2299 	  edge_iterator ei;
2300 
2301 	  if (final_dest_bb != NULL
2302 	      || EDGE_COUNT (other_bb->preds) != 2)
2303 	    return false;
2304 
2305 	  /* We must be able to move the insns across the whole block.  */
2306 	  move_before = BB_HEAD (bb);
2307 	  while (!NONDEBUG_INSN_P (move_before))
2308 	    move_before = NEXT_INSN (move_before);
2309 
2310 	  if (EDGE_COUNT (bb->preds) != 1)
2311 	    return false;
2312 	  incoming_edge = EDGE_PRED (bb, 0);
2313 	  final_dest_bb = incoming_edge->src;
2314 	  if (EDGE_COUNT (final_dest_bb->succs) != 2)
2315 	    return false;
2316 	  FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs)
2317 	    if (incoming_bb_other_edge != incoming_edge)
2318 	      break;
2319 	  if (incoming_bb_other_edge->dest != other_bb)
2320 	    return false;
2321 	}
2322     }
2323 
2324   e0 = EDGE_SUCC (bb, 0);
2325   e0_last_head = NULL_RTX;
2326   changed = false;
2327 
2328   for (ix = 1; ix < nedges; ix++)
2329     {
2330       edge e = EDGE_SUCC (bb, ix);
2331       rtx e0_last, e_last;
2332       int nmatch;
2333 
2334       nmatch = flow_find_head_matching_sequence (e0->dest, e->dest,
2335 						 &e0_last, &e_last, 0);
2336       if (nmatch == 0)
2337 	return false;
2338 
2339       if (nmatch < max_match)
2340 	{
2341 	  max_match = nmatch;
2342 	  e0_last_head = e0_last;
2343 	}
2344     }
2345 
2346   /* If we matched an entire block, we probably have to avoid moving the
2347      last insn.  */
2348   if (max_match > 0
2349       && e0_last_head == BB_END (e0->dest)
2350       && (find_reg_note (e0_last_head, REG_EH_REGION, 0)
2351 	  || control_flow_insn_p (e0_last_head)))
2352     {
2353       max_match--;
2354       if (max_match == 0)
2355 	return false;
2356       do
2357 	e0_last_head = prev_real_insn (e0_last_head);
2358       while (DEBUG_INSN_P (e0_last_head));
2359     }
2360 
2361   if (max_match == 0)
2362     return false;
2363 
2364   /* We must find a union of the live registers at each of the end points.  */
2365   live = BITMAP_ALLOC (NULL);
2366   live_union = BITMAP_ALLOC (NULL);
2367 
2368   currptr = XNEWVEC (rtx, nedges);
2369   headptr = XNEWVEC (rtx, nedges);
2370   nextptr = XNEWVEC (rtx, nedges);
2371 
2372   for (ix = 0; ix < nedges; ix++)
2373     {
2374       int j;
2375       basic_block merge_bb = EDGE_SUCC (bb, ix)->dest;
2376       rtx head = BB_HEAD (merge_bb);
2377 
2378       while (!NONDEBUG_INSN_P (head))
2379 	head = NEXT_INSN (head);
2380       headptr[ix] = head;
2381       currptr[ix] = head;
2382 
2383       /* Compute the end point and live information  */
2384       for (j = 1; j < max_match; j++)
2385 	do
2386 	  head = NEXT_INSN (head);
2387 	while (!NONDEBUG_INSN_P (head));
2388       simulate_backwards_to_point (merge_bb, live, head);
2389       IOR_REG_SET (live_union, live);
2390     }
2391 
2392   /* If we're moving across two blocks, verify the validity of the
2393      first move, then adjust the target and let the loop below deal
2394      with the final move.  */
2395   if (final_dest_bb != NULL)
2396     {
2397       rtx move_upto;
2398 
2399       moveall = can_move_insns_across (currptr[0], e0_last_head, move_before,
2400 				       jump, e0->dest, live_union,
2401 				       NULL, &move_upto);
2402       if (!moveall)
2403 	{
2404 	  if (move_upto == NULL_RTX)
2405 	    goto out;
2406 
2407 	  while (e0_last_head != move_upto)
2408 	    {
2409 	      df_simulate_one_insn_backwards (e0->dest, e0_last_head,
2410 					      live_union);
2411 	      e0_last_head = PREV_INSN (e0_last_head);
2412 	    }
2413 	}
2414       if (e0_last_head == NULL_RTX)
2415 	goto out;
2416 
2417       jump = BB_END (final_dest_bb);
2418       cond = get_condition (jump, &move_before, true, false);
2419       if (cond == NULL_RTX)
2420 	{
2421 #ifdef HAVE_cc0
2422 	  if (reg_mentioned_p (cc0_rtx, jump))
2423 	    move_before = prev_nonnote_nondebug_insn (jump);
2424 	  else
2425 #endif
2426 	    move_before = jump;
2427 	}
2428     }
2429 
2430   do
2431     {
2432       rtx move_upto;
2433       moveall = can_move_insns_across (currptr[0], e0_last_head,
2434 				       move_before, jump, e0->dest, live_union,
2435 				       NULL, &move_upto);
2436       if (!moveall && move_upto == NULL_RTX)
2437 	{
2438 	  if (jump == move_before)
2439 	    break;
2440 
2441 	  /* Try again, using a different insertion point.  */
2442 	  move_before = jump;
2443 
2444 #ifdef HAVE_cc0
2445 	  /* Don't try moving before a cc0 user, as that may invalidate
2446 	     the cc0.  */
2447 	  if (reg_mentioned_p (cc0_rtx, jump))
2448 	    break;
2449 #endif
2450 
2451 	  continue;
2452 	}
2453 
2454       if (final_dest_bb && !moveall)
2455 	/* We haven't checked whether a partial move would be OK for the first
2456 	   move, so we have to fail this case.  */
2457 	break;
2458 
2459       changed = true;
2460       for (;;)
2461 	{
2462 	  if (currptr[0] == move_upto)
2463 	    break;
2464 	  for (ix = 0; ix < nedges; ix++)
2465 	    {
2466 	      rtx curr = currptr[ix];
2467 	      do
2468 		curr = NEXT_INSN (curr);
2469 	      while (!NONDEBUG_INSN_P (curr));
2470 	      currptr[ix] = curr;
2471 	    }
2472 	}
2473 
2474       /* If we can't currently move all of the identical insns, remember
2475 	 each insn after the range that we'll merge.  */
2476       if (!moveall)
2477 	for (ix = 0; ix < nedges; ix++)
2478 	  {
2479 	    rtx curr = currptr[ix];
2480 	    do
2481 	      curr = NEXT_INSN (curr);
2482 	    while (!NONDEBUG_INSN_P (curr));
2483 	    nextptr[ix] = curr;
2484 	  }
2485 
2486       reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before));
2487       df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest);
2488       if (final_dest_bb != NULL)
2489 	df_set_bb_dirty (final_dest_bb);
2490       df_set_bb_dirty (bb);
2491       for (ix = 1; ix < nedges; ix++)
2492 	{
2493 	  df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest);
2494 	  delete_insn_chain (headptr[ix], currptr[ix], false);
2495 	}
2496       if (!moveall)
2497 	{
2498 	  if (jump == move_before)
2499 	    break;
2500 
2501 	  /* For the unmerged insns, try a different insertion point.  */
2502 	  move_before = jump;
2503 
2504 #ifdef HAVE_cc0
2505 	  /* Don't try moving before a cc0 user, as that may invalidate
2506 	     the cc0.  */
2507 	  if (reg_mentioned_p (cc0_rtx, jump))
2508 	    break;
2509 #endif
2510 
2511 	  for (ix = 0; ix < nedges; ix++)
2512 	    currptr[ix] = headptr[ix] = nextptr[ix];
2513 	}
2514     }
2515   while (!moveall);
2516 
2517  out:
2518   free (currptr);
2519   free (headptr);
2520   free (nextptr);
2521 
2522   crossjumps_occured |= changed;
2523 
2524   return changed;
2525 }
2526 
2527 /* Return true if BB contains just bb note, or bb note followed
2528    by only DEBUG_INSNs.  */
2529 
2530 static bool
2531 trivially_empty_bb_p (basic_block bb)
2532 {
2533   rtx insn = BB_END (bb);
2534 
2535   while (1)
2536     {
2537       if (insn == BB_HEAD (bb))
2538 	return true;
2539       if (!DEBUG_INSN_P (insn))
2540 	return false;
2541       insn = PREV_INSN (insn);
2542     }
2543 }
2544 
2545 /* Do simple CFG optimizations - basic block merging, simplifying of jump
2546    instructions etc.  Return nonzero if changes were made.  */
2547 
2548 static bool
2549 try_optimize_cfg (int mode)
2550 {
2551   bool changed_overall = false;
2552   bool changed;
2553   int iterations = 0;
2554   basic_block bb, b, next;
2555 
2556   if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
2557     clear_bb_flags ();
2558 
2559   crossjumps_occured = false;
2560 
2561   FOR_EACH_BB (bb)
2562     update_forwarder_flag (bb);
2563 
2564   if (! targetm.cannot_modify_jumps_p ())
2565     {
2566       first_pass = true;
2567       /* Attempt to merge blocks as made possible by edge removal.  If
2568 	 a block has only one successor, and the successor has only
2569 	 one predecessor, they may be combined.  */
2570       do
2571 	{
2572 	  block_was_dirty = false;
2573 	  changed = false;
2574 	  iterations++;
2575 
2576 	  if (dump_file)
2577 	    fprintf (dump_file,
2578 		     "\n\ntry_optimize_cfg iteration %i\n\n",
2579 		     iterations);
2580 
2581 	  for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
2582 	    {
2583 	      basic_block c;
2584 	      edge s;
2585 	      bool changed_here = false;
2586 
2587 	      /* Delete trivially dead basic blocks.  This is either
2588 		 blocks with no predecessors, or empty blocks with no
2589 		 successors.  However if the empty block with no
2590 		 successors is the successor of the ENTRY_BLOCK, it is
2591 		 kept.  This ensures that the ENTRY_BLOCK will have a
2592 		 successor which is a precondition for many RTL
2593 		 passes.  Empty blocks may result from expanding
2594 		 __builtin_unreachable ().  */
2595 	      if (EDGE_COUNT (b->preds) == 0
2596 		  || (EDGE_COUNT (b->succs) == 0
2597 		      && trivially_empty_bb_p (b)
2598 		      && single_succ_edge (ENTRY_BLOCK_PTR)->dest != b))
2599 		{
2600 		  c = b->prev_bb;
2601 		  if (EDGE_COUNT (b->preds) > 0)
2602 		    {
2603 		      edge e;
2604 		      edge_iterator ei;
2605 
2606 		      if (current_ir_type () == IR_RTL_CFGLAYOUT)
2607 			{
2608 			  if (b->il.rtl->footer
2609 			      && BARRIER_P (b->il.rtl->footer))
2610 			    FOR_EACH_EDGE (e, ei, b->preds)
2611 			      if ((e->flags & EDGE_FALLTHRU)
2612 				  && e->src->il.rtl->footer == NULL)
2613 				{
2614 				  if (b->il.rtl->footer)
2615 				    {
2616 				      e->src->il.rtl->footer = b->il.rtl->footer;
2617 				      b->il.rtl->footer = NULL;
2618 				    }
2619 				  else
2620 				    {
2621 				      start_sequence ();
2622 				      e->src->il.rtl->footer = emit_barrier ();
2623 				      end_sequence ();
2624 				    }
2625 				}
2626 			}
2627 		      else
2628 			{
2629 			  rtx last = get_last_bb_insn (b);
2630 			  if (last && BARRIER_P (last))
2631 			    FOR_EACH_EDGE (e, ei, b->preds)
2632 			      if ((e->flags & EDGE_FALLTHRU))
2633 				emit_barrier_after (BB_END (e->src));
2634 			}
2635 		    }
2636 		  delete_basic_block (b);
2637 		  changed = true;
2638 		  /* Avoid trying to remove ENTRY_BLOCK_PTR.  */
2639 		  b = (c == ENTRY_BLOCK_PTR ? c->next_bb : c);
2640 		  continue;
2641 		}
2642 
2643 	      /* Remove code labels no longer used.  */
2644 	      if (single_pred_p (b)
2645 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2646 		  && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
2647 		  && LABEL_P (BB_HEAD (b))
2648 		  /* If the previous block ends with a branch to this
2649 		     block, we can't delete the label.  Normally this
2650 		     is a condjump that is yet to be simplified, but
2651 		     if CASE_DROPS_THRU, this can be a tablejump with
2652 		     some element going to the same place as the
2653 		     default (fallthru).  */
2654 		  && (single_pred (b) == ENTRY_BLOCK_PTR
2655 		      || !JUMP_P (BB_END (single_pred (b)))
2656 		      || ! label_is_jump_target_p (BB_HEAD (b),
2657 						   BB_END (single_pred (b)))))
2658 		{
2659 		  rtx label = BB_HEAD (b);
2660 
2661 		  delete_insn_chain (label, label, false);
2662 		  /* If the case label is undeletable, move it after the
2663 		     BASIC_BLOCK note.  */
2664 		  if (NOTE_KIND (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
2665 		    {
2666 		      rtx bb_note = NEXT_INSN (BB_HEAD (b));
2667 
2668 		      reorder_insns_nobb (label, label, bb_note);
2669 		      BB_HEAD (b) = bb_note;
2670 		      if (BB_END (b) == bb_note)
2671 			BB_END (b) = label;
2672 		    }
2673 		  if (dump_file)
2674 		    fprintf (dump_file, "Deleted label in block %i.\n",
2675 			     b->index);
2676 		}
2677 
2678 	      /* If we fall through an empty block, we can remove it.  */
2679 	      if (!(mode & CLEANUP_CFGLAYOUT)
2680 		  && single_pred_p (b)
2681 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2682 		  && !LABEL_P (BB_HEAD (b))
2683 		  && FORWARDER_BLOCK_P (b)
2684 		  /* Note that forwarder_block_p true ensures that
2685 		     there is a successor for this block.  */
2686 		  && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2687 		  && n_basic_blocks > NUM_FIXED_BLOCKS + 1)
2688 		{
2689 		  if (dump_file)
2690 		    fprintf (dump_file,
2691 			     "Deleting fallthru block %i.\n",
2692 			     b->index);
2693 
2694 		  c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
2695 		  redirect_edge_succ_nodup (single_pred_edge (b),
2696 					    single_succ (b));
2697 		  delete_basic_block (b);
2698 		  changed = true;
2699 		  b = c;
2700 		  continue;
2701 		}
2702 
2703 	      /* Merge B with its single successor, if any.  */
2704 	      if (single_succ_p (b)
2705 		  && (s = single_succ_edge (b))
2706 		  && !(s->flags & EDGE_COMPLEX)
2707 		  && (c = s->dest) != EXIT_BLOCK_PTR
2708 		  && single_pred_p (c)
2709 		  && b != c)
2710 		{
2711 		  /* When not in cfg_layout mode use code aware of reordering
2712 		     INSN.  This code possibly creates new basic blocks so it
2713 		     does not fit merge_blocks interface and is kept here in
2714 		     hope that it will become useless once more of compiler
2715 		     is transformed to use cfg_layout mode.  */
2716 
2717 		  if ((mode & CLEANUP_CFGLAYOUT)
2718 		      && can_merge_blocks_p (b, c))
2719 		    {
2720 		      merge_blocks (b, c);
2721 		      update_forwarder_flag (b);
2722 		      changed_here = true;
2723 		    }
2724 		  else if (!(mode & CLEANUP_CFGLAYOUT)
2725 			   /* If the jump insn has side effects,
2726 			      we can't kill the edge.  */
2727 			   && (!JUMP_P (BB_END (b))
2728 			       || (reload_completed
2729 				   ? simplejump_p (BB_END (b))
2730 				   : (onlyjump_p (BB_END (b))
2731 				      && !tablejump_p (BB_END (b),
2732 						       NULL, NULL))))
2733 			   && (next = merge_blocks_move (s, b, c, mode)))
2734 		      {
2735 			b = next;
2736 			changed_here = true;
2737 		      }
2738 		}
2739 
2740 	      /* Simplify branch over branch.  */
2741 	      if ((mode & CLEANUP_EXPENSIVE)
2742 		   && !(mode & CLEANUP_CFGLAYOUT)
2743 		   && try_simplify_condjump (b))
2744 		changed_here = true;
2745 
2746 	      /* If B has a single outgoing edge, but uses a
2747 		 non-trivial jump instruction without side-effects, we
2748 		 can either delete the jump entirely, or replace it
2749 		 with a simple unconditional jump.  */
2750 	      if (single_succ_p (b)
2751 		  && single_succ (b) != EXIT_BLOCK_PTR
2752 		  && onlyjump_p (BB_END (b))
2753 		  && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
2754 		  && try_redirect_by_replacing_jump (single_succ_edge (b),
2755 						     single_succ (b),
2756 						     (mode & CLEANUP_CFGLAYOUT) != 0))
2757 		{
2758 		  update_forwarder_flag (b);
2759 		  changed_here = true;
2760 		}
2761 
2762 	      /* Simplify branch to branch.  */
2763 	      if (try_forward_edges (mode, b))
2764 		{
2765 		  update_forwarder_flag (b);
2766 		  changed_here = true;
2767 		}
2768 
2769 	      /* Look for shared code between blocks.  */
2770 	      if ((mode & CLEANUP_CROSSJUMP)
2771 		  && try_crossjump_bb (mode, b))
2772 		changed_here = true;
2773 
2774 	      if ((mode & CLEANUP_CROSSJUMP)
2775 		  /* This can lengthen register lifetimes.  Do it only after
2776 		     reload.  */
2777 		  && reload_completed
2778 		  && try_head_merge_bb (b))
2779 		changed_here = true;
2780 
2781 	      /* Don't get confused by the index shift caused by
2782 		 deleting blocks.  */
2783 	      if (!changed_here)
2784 		b = b->next_bb;
2785 	      else
2786 		changed = true;
2787 	    }
2788 
2789 	  if ((mode & CLEANUP_CROSSJUMP)
2790 	      && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2791 	    changed = true;
2792 
2793 	  if (block_was_dirty)
2794 	    {
2795 	      /* This should only be set by head-merging.  */
2796 	      gcc_assert (mode & CLEANUP_CROSSJUMP);
2797 	      df_analyze ();
2798 	    }
2799 
2800 #ifdef ENABLE_CHECKING
2801 	  if (changed)
2802 	    verify_flow_info ();
2803 #endif
2804 
2805 	  changed_overall |= changed;
2806 	  first_pass = false;
2807 	}
2808       while (changed);
2809     }
2810 
2811   FOR_ALL_BB (b)
2812     b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2813 
2814   return changed_overall;
2815 }
2816 
2817 /* Delete all unreachable basic blocks.  */
2818 
2819 bool
2820 delete_unreachable_blocks (void)
2821 {
2822   bool changed = false;
2823   basic_block b, prev_bb;
2824 
2825   find_unreachable_blocks ();
2826 
2827   /* When we're in GIMPLE mode and there may be debug insns, we should
2828      delete blocks in reverse dominator order, so as to get a chance
2829      to substitute all released DEFs into debug stmts.  If we don't
2830      have dominators information, walking blocks backward gets us a
2831      better chance of retaining most debug information than
2832      otherwise.  */
2833   if (MAY_HAVE_DEBUG_STMTS && current_ir_type () == IR_GIMPLE
2834       && dom_info_available_p (CDI_DOMINATORS))
2835     {
2836       for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
2837 	{
2838 	  prev_bb = b->prev_bb;
2839 
2840 	  if (!(b->flags & BB_REACHABLE))
2841 	    {
2842 	      /* Speed up the removal of blocks that don't dominate
2843 		 others.  Walking backwards, this should be the common
2844 		 case.  */
2845 	      if (!first_dom_son (CDI_DOMINATORS, b))
2846 		delete_basic_block (b);
2847 	      else
2848 		{
2849 		  VEC (basic_block, heap) *h
2850 		    = get_all_dominated_blocks (CDI_DOMINATORS, b);
2851 
2852 		  while (VEC_length (basic_block, h))
2853 		    {
2854 		      b = VEC_pop (basic_block, h);
2855 
2856 		      prev_bb = b->prev_bb;
2857 
2858 		      gcc_assert (!(b->flags & BB_REACHABLE));
2859 
2860 		      delete_basic_block (b);
2861 		    }
2862 
2863 		  VEC_free (basic_block, heap, h);
2864 		}
2865 
2866 	      changed = true;
2867 	    }
2868 	}
2869     }
2870   else
2871     {
2872       for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
2873 	{
2874 	  prev_bb = b->prev_bb;
2875 
2876 	  if (!(b->flags & BB_REACHABLE))
2877 	    {
2878 	      delete_basic_block (b);
2879 	      changed = true;
2880 	    }
2881 	}
2882     }
2883 
2884   if (changed)
2885     tidy_fallthru_edges ();
2886   return changed;
2887 }
2888 
2889 /* Delete any jump tables never referenced.  We can't delete them at the
2890    time of removing tablejump insn as they are referenced by the preceding
2891    insns computing the destination, so we delay deleting and garbagecollect
2892    them once life information is computed.  */
2893 void
2894 delete_dead_jumptables (void)
2895 {
2896   basic_block bb;
2897 
2898   /* A dead jump table does not belong to any basic block.  Scan insns
2899      between two adjacent basic blocks.  */
2900   FOR_EACH_BB (bb)
2901     {
2902       rtx insn, next;
2903 
2904       for (insn = NEXT_INSN (BB_END (bb));
2905 	   insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
2906 	   insn = next)
2907 	{
2908 	  next = NEXT_INSN (insn);
2909 	  if (LABEL_P (insn)
2910 	      && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
2911 	      && JUMP_TABLE_DATA_P (next))
2912 	    {
2913 	      rtx label = insn, jump = next;
2914 
2915 	      if (dump_file)
2916 		fprintf (dump_file, "Dead jumptable %i removed\n",
2917 			 INSN_UID (insn));
2918 
2919 	      next = NEXT_INSN (next);
2920 	      delete_insn (jump);
2921 	      delete_insn (label);
2922 	    }
2923 	}
2924     }
2925 }
2926 
2927 
2928 /* Tidy the CFG by deleting unreachable code and whatnot.  */
2929 
2930 bool
2931 cleanup_cfg (int mode)
2932 {
2933   bool changed = false;
2934 
2935   /* Set the cfglayout mode flag here.  We could update all the callers
2936      but that is just inconvenient, especially given that we eventually
2937      want to have cfglayout mode as the default.  */
2938   if (current_ir_type () == IR_RTL_CFGLAYOUT)
2939     mode |= CLEANUP_CFGLAYOUT;
2940 
2941   timevar_push (TV_CLEANUP_CFG);
2942   if (delete_unreachable_blocks ())
2943     {
2944       changed = true;
2945       /* We've possibly created trivially dead code.  Cleanup it right
2946 	 now to introduce more opportunities for try_optimize_cfg.  */
2947       if (!(mode & (CLEANUP_NO_INSN_DEL))
2948 	  && !reload_completed)
2949 	delete_trivially_dead_insns (get_insns (), max_reg_num ());
2950     }
2951 
2952   compact_blocks ();
2953 
2954   /* To tail-merge blocks ending in the same noreturn function (e.g.
2955      a call to abort) we have to insert fake edges to exit.  Do this
2956      here once.  The fake edges do not interfere with any other CFG
2957      cleanups.  */
2958   if (mode & CLEANUP_CROSSJUMP)
2959     add_noreturn_fake_exit_edges ();
2960 
2961   if (!dbg_cnt (cfg_cleanup))
2962     return changed;
2963 
2964   while (try_optimize_cfg (mode))
2965     {
2966       delete_unreachable_blocks (), changed = true;
2967       if (!(mode & CLEANUP_NO_INSN_DEL))
2968 	{
2969 	  /* Try to remove some trivially dead insns when doing an expensive
2970 	     cleanup.  But delete_trivially_dead_insns doesn't work after
2971 	     reload (it only handles pseudos) and run_fast_dce is too costly
2972 	     to run in every iteration.
2973 
2974 	     For effective cross jumping, we really want to run a fast DCE to
2975 	     clean up any dead conditions, or they get in the way of performing
2976 	     useful tail merges.
2977 
2978 	     Other transformations in cleanup_cfg are not so sensitive to dead
2979 	     code, so delete_trivially_dead_insns or even doing nothing at all
2980 	     is good enough.  */
2981 	  if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
2982 	      && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
2983 	    break;
2984 	  if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occured)
2985 	    run_fast_dce ();
2986 	}
2987       else
2988 	break;
2989     }
2990 
2991   if (mode & CLEANUP_CROSSJUMP)
2992     remove_fake_exit_edges ();
2993 
2994   /* Don't call delete_dead_jumptables in cfglayout mode, because
2995      that function assumes that jump tables are in the insns stream.
2996      But we also don't _have_ to delete dead jumptables in cfglayout
2997      mode because we shouldn't even be looking at things that are
2998      not in a basic block.  Dead jumptables are cleaned up when
2999      going out of cfglayout mode.  */
3000   if (!(mode & CLEANUP_CFGLAYOUT))
3001     delete_dead_jumptables ();
3002 
3003   timevar_pop (TV_CLEANUP_CFG);
3004 
3005   return changed;
3006 }
3007 
3008 static unsigned int
3009 rest_of_handle_jump (void)
3010 {
3011   if (crtl->tail_call_emit)
3012     fixup_tail_calls ();
3013   return 0;
3014 }
3015 
3016 struct rtl_opt_pass pass_jump =
3017 {
3018  {
3019   RTL_PASS,
3020   "sibling",                            /* name */
3021   NULL,                                 /* gate */
3022   rest_of_handle_jump,			/* execute */
3023   NULL,                                 /* sub */
3024   NULL,                                 /* next */
3025   0,                                    /* static_pass_number */
3026   TV_JUMP,                              /* tv_id */
3027   0,                                    /* properties_required */
3028   0,                                    /* properties_provided */
3029   0,                                    /* properties_destroyed */
3030   TODO_ggc_collect,                     /* todo_flags_start */
3031   TODO_verify_flow,                     /* todo_flags_finish */
3032  }
3033 };
3034 
3035 
3036 static unsigned int
3037 rest_of_handle_jump2 (void)
3038 {
3039   delete_trivially_dead_insns (get_insns (), max_reg_num ());
3040   if (dump_file)
3041     dump_flow_info (dump_file, dump_flags);
3042   cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
3043 	       | (flag_thread_jumps ? CLEANUP_THREADING : 0));
3044   return 0;
3045 }
3046 
3047 
3048 struct rtl_opt_pass pass_jump2 =
3049 {
3050  {
3051   RTL_PASS,
3052   "jump",                               /* name */
3053   NULL,                                 /* gate */
3054   rest_of_handle_jump2,			/* execute */
3055   NULL,                                 /* sub */
3056   NULL,                                 /* next */
3057   0,                                    /* static_pass_number */
3058   TV_JUMP,                              /* tv_id */
3059   0,                                    /* properties_required */
3060   0,                                    /* properties_provided */
3061   0,                                    /* properties_destroyed */
3062   TODO_ggc_collect,                     /* todo_flags_start */
3063   TODO_verify_rtl_sharing,              /* todo_flags_finish */
3064  }
3065 };
3066