xref: /dragonfly/contrib/gcc-8.0/gcc/cfgcleanup.c (revision c87dd536)
1 /* Control flow optimization code for GNU compiler.
2    Copyright (C) 1987-2018 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* This file contains optimizer of the control flow.  The main entry point is
21    cleanup_cfg.  Following optimizations are performed:
22 
23    - Unreachable blocks removal
24    - Edge forwarding (edge to the forwarder block is forwarded to its
25      successor.  Simplification of the branch instruction is performed by
26      underlying infrastructure so branch can be converted to simplejump or
27      eliminated).
28    - Cross jumping (tail merging)
29    - Conditional jump-around-simplejump simplification
30    - Basic block merging.  */
31 
32 #include "config.h"
33 #include "system.h"
34 #include "coretypes.h"
35 #include "backend.h"
36 #include "target.h"
37 #include "rtl.h"
38 #include "tree.h"
39 #include "cfghooks.h"
40 #include "df.h"
41 #include "memmodel.h"
42 #include "tm_p.h"
43 #include "insn-config.h"
44 #include "emit-rtl.h"
45 #include "cselib.h"
46 #include "params.h"
47 #include "tree-pass.h"
48 #include "cfgloop.h"
49 #include "cfgrtl.h"
50 #include "cfganal.h"
51 #include "cfgbuild.h"
52 #include "cfgcleanup.h"
53 #include "dce.h"
54 #include "dbgcnt.h"
55 #include "rtl-iter.h"
56 
57 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
58 
59 /* Set to true when we are running first pass of try_optimize_cfg loop.  */
60 static bool first_pass;
61 
62 /* Set to true if crossjumps occurred in the latest run of try_optimize_cfg.  */
63 static bool crossjumps_occurred;
64 
65 /* Set to true if we couldn't run an optimization due to stale liveness
66    information; we should run df_analyze to enable more opportunities.  */
67 static bool block_was_dirty;
68 
69 static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction);
70 static bool try_crossjump_bb (int, basic_block);
71 static bool outgoing_edges_match (int, basic_block, basic_block);
72 static enum replace_direction old_insns_match_p (int, rtx_insn *, rtx_insn *);
73 
74 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
75 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
76 static bool try_optimize_cfg (int);
77 static bool try_simplify_condjump (basic_block);
78 static bool try_forward_edges (int, basic_block);
79 static edge thread_jump (edge, basic_block);
80 static bool mark_effect (rtx, bitmap);
81 static void notice_new_block (basic_block);
82 static void update_forwarder_flag (basic_block);
83 static void merge_memattrs (rtx, rtx);
84 
85 /* Set flags for newly created block.  */
86 
87 static void
88 notice_new_block (basic_block bb)
89 {
90   if (!bb)
91     return;
92 
93   if (forwarder_block_p (bb))
94     bb->flags |= BB_FORWARDER_BLOCK;
95 }
96 
97 /* Recompute forwarder flag after block has been modified.  */
98 
99 static void
100 update_forwarder_flag (basic_block bb)
101 {
102   if (forwarder_block_p (bb))
103     bb->flags |= BB_FORWARDER_BLOCK;
104   else
105     bb->flags &= ~BB_FORWARDER_BLOCK;
106 }
107 
108 /* Simplify a conditional jump around an unconditional jump.
109    Return true if something changed.  */
110 
111 static bool
112 try_simplify_condjump (basic_block cbranch_block)
113 {
114   basic_block jump_block, jump_dest_block, cbranch_dest_block;
115   edge cbranch_jump_edge, cbranch_fallthru_edge;
116   rtx_insn *cbranch_insn;
117 
118   /* Verify that there are exactly two successors.  */
119   if (EDGE_COUNT (cbranch_block->succs) != 2)
120     return false;
121 
122   /* Verify that we've got a normal conditional branch at the end
123      of the block.  */
124   cbranch_insn = BB_END (cbranch_block);
125   if (!any_condjump_p (cbranch_insn))
126     return false;
127 
128   cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
129   cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
130 
131   /* The next block must not have multiple predecessors, must not
132      be the last block in the function, and must contain just the
133      unconditional jump.  */
134   jump_block = cbranch_fallthru_edge->dest;
135   if (!single_pred_p (jump_block)
136       || jump_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
137       || !FORWARDER_BLOCK_P (jump_block))
138     return false;
139   jump_dest_block = single_succ (jump_block);
140 
141   /* If we are partitioning hot/cold basic blocks, we don't want to
142      mess up unconditional or indirect jumps that cross between hot
143      and cold sections.
144 
145      Basic block partitioning may result in some jumps that appear to
146      be optimizable (or blocks that appear to be mergeable), but which really
147      must be left untouched (they are required to make it safely across
148      partition boundaries).  See the comments at the top of
149      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
150 
151   if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
152       || (cbranch_jump_edge->flags & EDGE_CROSSING))
153     return false;
154 
155   /* The conditional branch must target the block after the
156      unconditional branch.  */
157   cbranch_dest_block = cbranch_jump_edge->dest;
158 
159   if (cbranch_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
160       || jump_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
161       || !can_fallthru (jump_block, cbranch_dest_block))
162     return false;
163 
164   /* Invert the conditional branch.  */
165   if (!invert_jump (as_a <rtx_jump_insn *> (cbranch_insn),
166 		    block_label (jump_dest_block), 0))
167     return false;
168 
169   if (dump_file)
170     fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
171 	     INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
172 
173   /* Success.  Update the CFG to match.  Note that after this point
174      the edge variable names appear backwards; the redirection is done
175      this way to preserve edge profile data.  */
176   cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
177 						cbranch_dest_block);
178   cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
179 						    jump_dest_block);
180   cbranch_jump_edge->flags |= EDGE_FALLTHRU;
181   cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
182   update_br_prob_note (cbranch_block);
183 
184   /* Delete the block with the unconditional jump, and clean up the mess.  */
185   delete_basic_block (jump_block);
186   tidy_fallthru_edge (cbranch_jump_edge);
187   update_forwarder_flag (cbranch_block);
188 
189   return true;
190 }
191 
192 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
193    on register.  Used by jump threading.  */
194 
195 static bool
196 mark_effect (rtx exp, regset nonequal)
197 {
198   rtx dest;
199   switch (GET_CODE (exp))
200     {
201       /* In case we do clobber the register, mark it as equal, as we know the
202 	 value is dead so it don't have to match.  */
203     case CLOBBER:
204       dest = XEXP (exp, 0);
205       if (REG_P (dest))
206 	bitmap_clear_range (nonequal, REGNO (dest), REG_NREGS (dest));
207       return false;
208 
209     case SET:
210       if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
211 	return false;
212       dest = SET_DEST (exp);
213       if (dest == pc_rtx)
214 	return false;
215       if (!REG_P (dest))
216 	return true;
217       bitmap_set_range (nonequal, REGNO (dest), REG_NREGS (dest));
218       return false;
219 
220     default:
221       return false;
222     }
223 }
224 
225 /* Return true if X contains a register in NONEQUAL.  */
226 static bool
227 mentions_nonequal_regs (const_rtx x, regset nonequal)
228 {
229   subrtx_iterator::array_type array;
230   FOR_EACH_SUBRTX (iter, array, x, NONCONST)
231     {
232       const_rtx x = *iter;
233       if (REG_P (x))
234 	{
235 	  unsigned int end_regno = END_REGNO (x);
236 	  for (unsigned int regno = REGNO (x); regno < end_regno; ++regno)
237 	    if (REGNO_REG_SET_P (nonequal, regno))
238 	      return true;
239 	}
240     }
241   return false;
242 }
243 
244 /* Attempt to prove that the basic block B will have no side effects and
245    always continues in the same edge if reached via E.  Return the edge
246    if exist, NULL otherwise.  */
247 
248 static edge
249 thread_jump (edge e, basic_block b)
250 {
251   rtx set1, set2, cond1, cond2;
252   rtx_insn *insn;
253   enum rtx_code code1, code2, reversed_code2;
254   bool reverse1 = false;
255   unsigned i;
256   regset nonequal;
257   bool failed = false;
258   reg_set_iterator rsi;
259 
260   if (b->flags & BB_NONTHREADABLE_BLOCK)
261     return NULL;
262 
263   /* At the moment, we do handle only conditional jumps, but later we may
264      want to extend this code to tablejumps and others.  */
265   if (EDGE_COUNT (e->src->succs) != 2)
266     return NULL;
267   if (EDGE_COUNT (b->succs) != 2)
268     {
269       b->flags |= BB_NONTHREADABLE_BLOCK;
270       return NULL;
271     }
272 
273   /* Second branch must end with onlyjump, as we will eliminate the jump.  */
274   if (!any_condjump_p (BB_END (e->src)))
275     return NULL;
276 
277   if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
278     {
279       b->flags |= BB_NONTHREADABLE_BLOCK;
280       return NULL;
281     }
282 
283   set1 = pc_set (BB_END (e->src));
284   set2 = pc_set (BB_END (b));
285   if (((e->flags & EDGE_FALLTHRU) != 0)
286       != (XEXP (SET_SRC (set1), 1) == pc_rtx))
287     reverse1 = true;
288 
289   cond1 = XEXP (SET_SRC (set1), 0);
290   cond2 = XEXP (SET_SRC (set2), 0);
291   if (reverse1)
292     code1 = reversed_comparison_code (cond1, BB_END (e->src));
293   else
294     code1 = GET_CODE (cond1);
295 
296   code2 = GET_CODE (cond2);
297   reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
298 
299   if (!comparison_dominates_p (code1, code2)
300       && !comparison_dominates_p (code1, reversed_code2))
301     return NULL;
302 
303   /* Ensure that the comparison operators are equivalent.
304      ??? This is far too pessimistic.  We should allow swapped operands,
305      different CCmodes, or for example comparisons for interval, that
306      dominate even when operands are not equivalent.  */
307   if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
308       || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
309     return NULL;
310 
311   /* Short circuit cases where block B contains some side effects, as we can't
312      safely bypass it.  */
313   for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
314        insn = NEXT_INSN (insn))
315     if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
316       {
317 	b->flags |= BB_NONTHREADABLE_BLOCK;
318 	return NULL;
319       }
320 
321   cselib_init (0);
322 
323   /* First process all values computed in the source basic block.  */
324   for (insn = NEXT_INSN (BB_HEAD (e->src));
325        insn != NEXT_INSN (BB_END (e->src));
326        insn = NEXT_INSN (insn))
327     if (INSN_P (insn))
328       cselib_process_insn (insn);
329 
330   nonequal = BITMAP_ALLOC (NULL);
331   CLEAR_REG_SET (nonequal);
332 
333   /* Now assume that we've continued by the edge E to B and continue
334      processing as if it were same basic block.
335      Our goal is to prove that whole block is an NOOP.  */
336 
337   for (insn = NEXT_INSN (BB_HEAD (b));
338        insn != NEXT_INSN (BB_END (b)) && !failed;
339        insn = NEXT_INSN (insn))
340     {
341       if (INSN_P (insn))
342 	{
343 	  rtx pat = PATTERN (insn);
344 
345 	  if (GET_CODE (pat) == PARALLEL)
346 	    {
347 	      for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
348 		failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
349 	    }
350 	  else
351 	    failed |= mark_effect (pat, nonequal);
352 	}
353 
354       cselib_process_insn (insn);
355     }
356 
357   /* Later we should clear nonequal of dead registers.  So far we don't
358      have life information in cfg_cleanup.  */
359   if (failed)
360     {
361       b->flags |= BB_NONTHREADABLE_BLOCK;
362       goto failed_exit;
363     }
364 
365   /* cond2 must not mention any register that is not equal to the
366      former block.  */
367   if (mentions_nonequal_regs (cond2, nonequal))
368     goto failed_exit;
369 
370   EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
371     goto failed_exit;
372 
373   BITMAP_FREE (nonequal);
374   cselib_finish ();
375   if ((comparison_dominates_p (code1, code2) != 0)
376       != (XEXP (SET_SRC (set2), 1) == pc_rtx))
377     return BRANCH_EDGE (b);
378   else
379     return FALLTHRU_EDGE (b);
380 
381 failed_exit:
382   BITMAP_FREE (nonequal);
383   cselib_finish ();
384   return NULL;
385 }
386 
387 /* Attempt to forward edges leaving basic block B.
388    Return true if successful.  */
389 
390 static bool
391 try_forward_edges (int mode, basic_block b)
392 {
393   bool changed = false;
394   edge_iterator ei;
395   edge e, *threaded_edges = NULL;
396 
397   for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
398     {
399       basic_block target, first;
400       location_t goto_locus;
401       int counter;
402       bool threaded = false;
403       int nthreaded_edges = 0;
404       bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0;
405       bool new_target_threaded = false;
406 
407       /* Skip complex edges because we don't know how to update them.
408 
409 	 Still handle fallthru edges, as we can succeed to forward fallthru
410 	 edge to the same place as the branch edge of conditional branch
411 	 and turn conditional branch to an unconditional branch.  */
412       if (e->flags & EDGE_COMPLEX)
413 	{
414 	  ei_next (&ei);
415 	  continue;
416 	}
417 
418       target = first = e->dest;
419       counter = NUM_FIXED_BLOCKS;
420       goto_locus = e->goto_locus;
421 
422       while (counter < n_basic_blocks_for_fn (cfun))
423 	{
424 	  basic_block new_target = NULL;
425 	  may_thread |= (target->flags & BB_MODIFIED) != 0;
426 
427 	  if (FORWARDER_BLOCK_P (target)
428 	      && single_succ (target) != EXIT_BLOCK_PTR_FOR_FN (cfun))
429 	    {
430 	      /* Bypass trivial infinite loops.  */
431 	      new_target = single_succ (target);
432 	      if (target == new_target)
433 		counter = n_basic_blocks_for_fn (cfun);
434 	      else if (!optimize)
435 		{
436 		  /* When not optimizing, ensure that edges or forwarder
437 		     blocks with different locus are not optimized out.  */
438 		  location_t new_locus = single_succ_edge (target)->goto_locus;
439 		  location_t locus = goto_locus;
440 
441 		  if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
442 		      && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
443 		      && new_locus != locus)
444 		    new_target = NULL;
445 		  else
446 		    {
447 		      if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
448 			locus = new_locus;
449 
450 		      rtx_insn *last = BB_END (target);
451 		      if (DEBUG_INSN_P (last))
452 			last = prev_nondebug_insn (last);
453 		      if (last && INSN_P (last))
454 			new_locus = INSN_LOCATION (last);
455 		      else
456 			new_locus = UNKNOWN_LOCATION;
457 
458 		      if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
459 			  && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
460 			  && new_locus != locus)
461 			new_target = NULL;
462 		      else
463 			{
464 			  if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
465 			    locus = new_locus;
466 
467 			  goto_locus = locus;
468 			}
469 		    }
470 		}
471 	    }
472 
473 	  /* Allow to thread only over one edge at time to simplify updating
474 	     of probabilities.  */
475 	  else if ((mode & CLEANUP_THREADING) && may_thread)
476 	    {
477 	      edge t = thread_jump (e, target);
478 	      if (t)
479 		{
480 		  if (!threaded_edges)
481 		    threaded_edges = XNEWVEC (edge,
482 					      n_basic_blocks_for_fn (cfun));
483 		  else
484 		    {
485 		      int i;
486 
487 		      /* Detect an infinite loop across blocks not
488 			 including the start block.  */
489 		      for (i = 0; i < nthreaded_edges; ++i)
490 			if (threaded_edges[i] == t)
491 			  break;
492 		      if (i < nthreaded_edges)
493 			{
494 			  counter = n_basic_blocks_for_fn (cfun);
495 			  break;
496 			}
497 		    }
498 
499 		  /* Detect an infinite loop across the start block.  */
500 		  if (t->dest == b)
501 		    break;
502 
503 		  gcc_assert (nthreaded_edges
504 			      < (n_basic_blocks_for_fn (cfun)
505 				 - NUM_FIXED_BLOCKS));
506 		  threaded_edges[nthreaded_edges++] = t;
507 
508 		  new_target = t->dest;
509 		  new_target_threaded = true;
510 		}
511 	    }
512 
513 	  if (!new_target)
514 	    break;
515 
516 	  counter++;
517 	  /* Do not turn non-crossing jump to crossing.  Depending on target
518 	     it may require different instruction pattern.  */
519 	  if ((e->flags & EDGE_CROSSING)
520 	      || BB_PARTITION (first) == BB_PARTITION (new_target))
521 	    {
522 	      target = new_target;
523 	      threaded |= new_target_threaded;
524 	    }
525 	}
526 
527       if (counter >= n_basic_blocks_for_fn (cfun))
528 	{
529 	  if (dump_file)
530 	    fprintf (dump_file, "Infinite loop in BB %i.\n",
531 		     target->index);
532 	}
533       else if (target == first)
534 	; /* We didn't do anything.  */
535       else
536 	{
537 	  /* Save the values now, as the edge may get removed.  */
538 	  profile_count edge_count = e->count ();
539 	  int n = 0;
540 
541 	  e->goto_locus = goto_locus;
542 
543 	  /* Don't force if target is exit block.  */
544 	  if (threaded && target != EXIT_BLOCK_PTR_FOR_FN (cfun))
545 	    {
546 	      notice_new_block (redirect_edge_and_branch_force (e, target));
547 	      if (dump_file)
548 		fprintf (dump_file, "Conditionals threaded.\n");
549 	    }
550 	  else if (!redirect_edge_and_branch (e, target))
551 	    {
552 	      if (dump_file)
553 		fprintf (dump_file,
554 			 "Forwarding edge %i->%i to %i failed.\n",
555 			 b->index, e->dest->index, target->index);
556 	      ei_next (&ei);
557 	      continue;
558 	    }
559 
560 	  /* We successfully forwarded the edge.  Now update profile
561 	     data: for each edge we traversed in the chain, remove
562 	     the original edge's execution count.  */
563 	  do
564 	    {
565 	      edge t;
566 
567 	      if (!single_succ_p (first))
568 		{
569 		  gcc_assert (n < nthreaded_edges);
570 		  t = threaded_edges [n++];
571 		  gcc_assert (t->src == first);
572 		  update_bb_profile_for_threading (first, edge_count, t);
573 		  update_br_prob_note (first);
574 		}
575 	      else
576 		{
577 		  first->count -= edge_count;
578 		  /* It is possible that as the result of
579 		     threading we've removed edge as it is
580 		     threaded to the fallthru edge.  Avoid
581 		     getting out of sync.  */
582 		  if (n < nthreaded_edges
583 		      && first == threaded_edges [n]->src)
584 		    n++;
585 		  t = single_succ_edge (first);
586 		}
587 
588 	      first = t->dest;
589 	    }
590 	  while (first != target);
591 
592 	  changed = true;
593 	  continue;
594 	}
595       ei_next (&ei);
596     }
597 
598   free (threaded_edges);
599   return changed;
600 }
601 
602 
603 /* Blocks A and B are to be merged into a single block.  A has no incoming
604    fallthru edge, so it can be moved before B without adding or modifying
605    any jumps (aside from the jump from A to B).  */
606 
607 static void
608 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
609 {
610   rtx_insn *barrier;
611 
612   /* If we are partitioning hot/cold basic blocks, we don't want to
613      mess up unconditional or indirect jumps that cross between hot
614      and cold sections.
615 
616      Basic block partitioning may result in some jumps that appear to
617      be optimizable (or blocks that appear to be mergeable), but which really
618      must be left untouched (they are required to make it safely across
619      partition boundaries).  See the comments at the top of
620      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
621 
622   if (BB_PARTITION (a) != BB_PARTITION (b))
623     return;
624 
625   barrier = next_nonnote_insn (BB_END (a));
626   gcc_assert (BARRIER_P (barrier));
627   delete_insn (barrier);
628 
629   /* Scramble the insn chain.  */
630   if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
631     reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
632   df_set_bb_dirty (a);
633 
634   if (dump_file)
635     fprintf (dump_file, "Moved block %d before %d and merged.\n",
636 	     a->index, b->index);
637 
638   /* Swap the records for the two blocks around.  */
639 
640   unlink_block (a);
641   link_block (a, b->prev_bb);
642 
643   /* Now blocks A and B are contiguous.  Merge them.  */
644   merge_blocks (a, b);
645 }
646 
647 /* Blocks A and B are to be merged into a single block.  B has no outgoing
648    fallthru edge, so it can be moved after A without adding or modifying
649    any jumps (aside from the jump from A to B).  */
650 
651 static void
652 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
653 {
654   rtx_insn *barrier, *real_b_end;
655   rtx_insn *label;
656   rtx_jump_table_data *table;
657 
658   /* If we are partitioning hot/cold basic blocks, we don't want to
659      mess up unconditional or indirect jumps that cross between hot
660      and cold sections.
661 
662      Basic block partitioning may result in some jumps that appear to
663      be optimizable (or blocks that appear to be mergeable), but which really
664      must be left untouched (they are required to make it safely across
665      partition boundaries).  See the comments at the top of
666      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
667 
668   if (BB_PARTITION (a) != BB_PARTITION (b))
669     return;
670 
671   real_b_end = BB_END (b);
672 
673   /* If there is a jump table following block B temporarily add the jump table
674      to block B so that it will also be moved to the correct location.  */
675   if (tablejump_p (BB_END (b), &label, &table)
676       && prev_active_insn (label) == BB_END (b))
677     {
678       BB_END (b) = table;
679     }
680 
681   /* There had better have been a barrier there.  Delete it.  */
682   barrier = NEXT_INSN (BB_END (b));
683   if (barrier && BARRIER_P (barrier))
684     delete_insn (barrier);
685 
686 
687   /* Scramble the insn chain.  */
688   reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
689 
690   /* Restore the real end of b.  */
691   BB_END (b) = real_b_end;
692 
693   if (dump_file)
694     fprintf (dump_file, "Moved block %d after %d and merged.\n",
695 	     b->index, a->index);
696 
697   /* Now blocks A and B are contiguous.  Merge them.  */
698   merge_blocks (a, b);
699 }
700 
701 /* Attempt to merge basic blocks that are potentially non-adjacent.
702    Return NULL iff the attempt failed, otherwise return basic block
703    where cleanup_cfg should continue.  Because the merging commonly
704    moves basic block away or introduces another optimization
705    possibility, return basic block just before B so cleanup_cfg don't
706    need to iterate.
707 
708    It may be good idea to return basic block before C in the case
709    C has been moved after B and originally appeared earlier in the
710    insn sequence, but we have no information available about the
711    relative ordering of these two.  Hopefully it is not too common.  */
712 
713 static basic_block
714 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
715 {
716   basic_block next;
717 
718   /* If we are partitioning hot/cold basic blocks, we don't want to
719      mess up unconditional or indirect jumps that cross between hot
720      and cold sections.
721 
722      Basic block partitioning may result in some jumps that appear to
723      be optimizable (or blocks that appear to be mergeable), but which really
724      must be left untouched (they are required to make it safely across
725      partition boundaries).  See the comments at the top of
726      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
727 
728   if (BB_PARTITION (b) != BB_PARTITION (c))
729     return NULL;
730 
731   /* If B has a fallthru edge to C, no need to move anything.  */
732   if (e->flags & EDGE_FALLTHRU)
733     {
734       int b_index = b->index, c_index = c->index;
735 
736       /* Protect the loop latches.  */
737       if (current_loops && c->loop_father->latch == c)
738 	return NULL;
739 
740       merge_blocks (b, c);
741       update_forwarder_flag (b);
742 
743       if (dump_file)
744 	fprintf (dump_file, "Merged %d and %d without moving.\n",
745 		 b_index, c_index);
746 
747       return b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? b : b->prev_bb;
748     }
749 
750   /* Otherwise we will need to move code around.  Do that only if expensive
751      transformations are allowed.  */
752   else if (mode & CLEANUP_EXPENSIVE)
753     {
754       edge tmp_edge, b_fallthru_edge;
755       bool c_has_outgoing_fallthru;
756       bool b_has_incoming_fallthru;
757 
758       /* Avoid overactive code motion, as the forwarder blocks should be
759 	 eliminated by edge redirection instead.  One exception might have
760 	 been if B is a forwarder block and C has no fallthru edge, but
761 	 that should be cleaned up by bb-reorder instead.  */
762       if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
763 	return NULL;
764 
765       /* We must make sure to not munge nesting of lexical blocks,
766 	 and loop notes.  This is done by squeezing out all the notes
767 	 and leaving them there to lie.  Not ideal, but functional.  */
768 
769       tmp_edge = find_fallthru_edge (c->succs);
770       c_has_outgoing_fallthru = (tmp_edge != NULL);
771 
772       tmp_edge = find_fallthru_edge (b->preds);
773       b_has_incoming_fallthru = (tmp_edge != NULL);
774       b_fallthru_edge = tmp_edge;
775       next = b->prev_bb;
776       if (next == c)
777 	next = next->prev_bb;
778 
779       /* Otherwise, we're going to try to move C after B.  If C does
780 	 not have an outgoing fallthru, then it can be moved
781 	 immediately after B without introducing or modifying jumps.  */
782       if (! c_has_outgoing_fallthru)
783 	{
784 	  merge_blocks_move_successor_nojumps (b, c);
785 	  return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
786 	}
787 
788       /* If B does not have an incoming fallthru, then it can be moved
789 	 immediately before C without introducing or modifying jumps.
790 	 C cannot be the first block, so we do not have to worry about
791 	 accessing a non-existent block.  */
792 
793       if (b_has_incoming_fallthru)
794 	{
795 	  basic_block bb;
796 
797 	  if (b_fallthru_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
798 	    return NULL;
799 	  bb = force_nonfallthru (b_fallthru_edge);
800 	  if (bb)
801 	    notice_new_block (bb);
802 	}
803 
804       merge_blocks_move_predecessor_nojumps (b, c);
805       return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
806     }
807 
808   return NULL;
809 }
810 
811 
812 /* Removes the memory attributes of MEM expression
813    if they are not equal.  */
814 
815 static void
816 merge_memattrs (rtx x, rtx y)
817 {
818   int i;
819   int j;
820   enum rtx_code code;
821   const char *fmt;
822 
823   if (x == y)
824     return;
825   if (x == 0 || y == 0)
826     return;
827 
828   code = GET_CODE (x);
829 
830   if (code != GET_CODE (y))
831     return;
832 
833   if (GET_MODE (x) != GET_MODE (y))
834     return;
835 
836   if (code == MEM && !mem_attrs_eq_p (MEM_ATTRS (x), MEM_ATTRS (y)))
837     {
838       if (! MEM_ATTRS (x))
839 	MEM_ATTRS (y) = 0;
840       else if (! MEM_ATTRS (y))
841 	MEM_ATTRS (x) = 0;
842       else
843 	{
844 	  if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
845 	    {
846 	      set_mem_alias_set (x, 0);
847 	      set_mem_alias_set (y, 0);
848 	    }
849 
850 	  if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
851 	    {
852 	      set_mem_expr (x, 0);
853 	      set_mem_expr (y, 0);
854 	      clear_mem_offset (x);
855 	      clear_mem_offset (y);
856 	    }
857 	  else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y)
858 		   || (MEM_OFFSET_KNOWN_P (x)
859 		       && maybe_ne (MEM_OFFSET (x), MEM_OFFSET (y))))
860 	    {
861 	      clear_mem_offset (x);
862 	      clear_mem_offset (y);
863 	    }
864 
865 	  if (!MEM_SIZE_KNOWN_P (x))
866 	    clear_mem_size (y);
867 	  else if (!MEM_SIZE_KNOWN_P (y))
868 	    clear_mem_size (x);
869 	  else if (known_le (MEM_SIZE (x), MEM_SIZE (y)))
870 	    set_mem_size (x, MEM_SIZE (y));
871 	  else if (known_le (MEM_SIZE (y), MEM_SIZE (x)))
872 	    set_mem_size (y, MEM_SIZE (x));
873 	  else
874 	    {
875 	      /* The sizes aren't ordered, so we can't merge them.  */
876 	      clear_mem_size (x);
877 	      clear_mem_size (y);
878 	    }
879 
880 	  set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
881 	  set_mem_align (y, MEM_ALIGN (x));
882 	}
883     }
884   if (code == MEM)
885     {
886       if (MEM_READONLY_P (x) != MEM_READONLY_P (y))
887 	{
888 	  MEM_READONLY_P (x) = 0;
889 	  MEM_READONLY_P (y) = 0;
890 	}
891       if (MEM_NOTRAP_P (x) != MEM_NOTRAP_P (y))
892 	{
893 	  MEM_NOTRAP_P (x) = 0;
894 	  MEM_NOTRAP_P (y) = 0;
895 	}
896       if (MEM_VOLATILE_P (x) != MEM_VOLATILE_P (y))
897 	{
898 	  MEM_VOLATILE_P (x) = 1;
899 	  MEM_VOLATILE_P (y) = 1;
900 	}
901     }
902 
903   fmt = GET_RTX_FORMAT (code);
904   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
905     {
906       switch (fmt[i])
907 	{
908 	case 'E':
909 	  /* Two vectors must have the same length.  */
910 	  if (XVECLEN (x, i) != XVECLEN (y, i))
911 	    return;
912 
913 	  for (j = 0; j < XVECLEN (x, i); j++)
914 	    merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
915 
916 	  break;
917 
918 	case 'e':
919 	  merge_memattrs (XEXP (x, i), XEXP (y, i));
920 	}
921     }
922   return;
923 }
924 
925 
926  /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
927     different single sets S1 and S2.  */
928 
929 static bool
930 equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2)
931 {
932   int i;
933   rtx e1, e2;
934 
935   if (p1 == s1 && p2 == s2)
936     return true;
937 
938   if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL)
939     return false;
940 
941   if (XVECLEN (p1, 0) != XVECLEN (p2, 0))
942     return false;
943 
944   for (i = 0; i < XVECLEN (p1, 0); i++)
945     {
946       e1 = XVECEXP (p1, 0, i);
947       e2 = XVECEXP (p2, 0, i);
948       if (e1 == s1 && e2 == s2)
949         continue;
950       if (reload_completed
951           ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2))
952         continue;
953 
954       return false;
955     }
956 
957   return true;
958 }
959 
960 
961 /* NOTE1 is the REG_EQUAL note, if any, attached to an insn
962    that is a single_set with a SET_SRC of SRC1.  Similarly
963    for NOTE2/SRC2.
964 
965    So effectively NOTE1/NOTE2 are an alternate form of
966    SRC1/SRC2 respectively.
967 
968    Return nonzero if SRC1 or NOTE1 has the same constant
969    integer value as SRC2 or NOTE2.   Else return zero.  */
970 static int
971 values_equal_p (rtx note1, rtx note2, rtx src1, rtx src2)
972 {
973   if (note1
974       && note2
975       && CONST_INT_P (XEXP (note1, 0))
976       && rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0)))
977     return 1;
978 
979   if (!note1
980       && !note2
981       && CONST_INT_P (src1)
982       && CONST_INT_P (src2)
983       && rtx_equal_p (src1, src2))
984     return 1;
985 
986   if (note1
987       && CONST_INT_P (src2)
988       && rtx_equal_p (XEXP (note1, 0), src2))
989     return 1;
990 
991   if (note2
992       && CONST_INT_P (src1)
993       && rtx_equal_p (XEXP (note2, 0), src1))
994     return 1;
995 
996   return 0;
997 }
998 
999 /* Examine register notes on I1 and I2 and return:
1000    - dir_forward if I1 can be replaced by I2, or
1001    - dir_backward if I2 can be replaced by I1, or
1002    - dir_both if both are the case.  */
1003 
1004 static enum replace_direction
1005 can_replace_by (rtx_insn *i1, rtx_insn *i2)
1006 {
1007   rtx s1, s2, d1, d2, src1, src2, note1, note2;
1008   bool c1, c2;
1009 
1010   /* Check for 2 sets.  */
1011   s1 = single_set (i1);
1012   s2 = single_set (i2);
1013   if (s1 == NULL_RTX || s2 == NULL_RTX)
1014     return dir_none;
1015 
1016   /* Check that the 2 sets set the same dest.  */
1017   d1 = SET_DEST (s1);
1018   d2 = SET_DEST (s2);
1019   if (!(reload_completed
1020         ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2)))
1021     return dir_none;
1022 
1023   /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
1024      set dest to the same value.  */
1025   note1 = find_reg_equal_equiv_note (i1);
1026   note2 = find_reg_equal_equiv_note (i2);
1027 
1028   src1 = SET_SRC (s1);
1029   src2 = SET_SRC (s2);
1030 
1031   if (!values_equal_p (note1, note2, src1, src2))
1032     return dir_none;
1033 
1034   if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2))
1035     return dir_none;
1036 
1037   /* Although the 2 sets set dest to the same value, we cannot replace
1038        (set (dest) (const_int))
1039      by
1040        (set (dest) (reg))
1041      because we don't know if the reg is live and has the same value at the
1042      location of replacement.  */
1043   c1 = CONST_INT_P (src1);
1044   c2 = CONST_INT_P (src2);
1045   if (c1 && c2)
1046     return dir_both;
1047   else if (c2)
1048     return dir_forward;
1049   else if (c1)
1050     return dir_backward;
1051 
1052   return dir_none;
1053 }
1054 
1055 /* Merges directions A and B.  */
1056 
1057 static enum replace_direction
1058 merge_dir (enum replace_direction a, enum replace_direction b)
1059 {
1060   /* Implements the following table:
1061         |bo fw bw no
1062      ---+-----------
1063      bo |bo fw bw no
1064      fw |-- fw no no
1065      bw |-- -- bw no
1066      no |-- -- -- no.  */
1067 
1068   if (a == b)
1069     return a;
1070 
1071   if (a == dir_both)
1072     return b;
1073   if (b == dir_both)
1074     return a;
1075 
1076   return dir_none;
1077 }
1078 
1079 /* Array of flags indexed by reg note kind, true if the given
1080    reg note is CFA related.  */
1081 static const bool reg_note_cfa_p[] = {
1082 #undef REG_CFA_NOTE
1083 #define DEF_REG_NOTE(NAME) false,
1084 #define REG_CFA_NOTE(NAME) true,
1085 #include "reg-notes.def"
1086 #undef REG_CFA_NOTE
1087 #undef DEF_REG_NOTE
1088   false
1089 };
1090 
1091 /* Return true if I1 and I2 have identical CFA notes (the same order
1092    and equivalent content).  */
1093 
1094 static bool
1095 insns_have_identical_cfa_notes (rtx_insn *i1, rtx_insn *i2)
1096 {
1097   rtx n1, n2;
1098   for (n1 = REG_NOTES (i1), n2 = REG_NOTES (i2); ;
1099        n1 = XEXP (n1, 1), n2 = XEXP (n2, 1))
1100     {
1101       /* Skip over reg notes not related to CFI information.  */
1102       while (n1 && !reg_note_cfa_p[REG_NOTE_KIND (n1)])
1103 	n1 = XEXP (n1, 1);
1104       while (n2 && !reg_note_cfa_p[REG_NOTE_KIND (n2)])
1105 	n2 = XEXP (n2, 1);
1106       if (n1 == NULL_RTX && n2 == NULL_RTX)
1107 	return true;
1108       if (n1 == NULL_RTX || n2 == NULL_RTX)
1109 	return false;
1110       if (XEXP (n1, 0) == XEXP (n2, 0))
1111 	;
1112       else if (XEXP (n1, 0) == NULL_RTX || XEXP (n2, 0) == NULL_RTX)
1113 	return false;
1114       else if (!(reload_completed
1115 		 ? rtx_renumbered_equal_p (XEXP (n1, 0), XEXP (n2, 0))
1116 		 : rtx_equal_p (XEXP (n1, 0), XEXP (n2, 0))))
1117 	return false;
1118     }
1119 }
1120 
1121 /* Examine I1 and I2 and return:
1122    - dir_forward if I1 can be replaced by I2, or
1123    - dir_backward if I2 can be replaced by I1, or
1124    - dir_both if both are the case.  */
1125 
1126 static enum replace_direction
1127 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx_insn *i1, rtx_insn *i2)
1128 {
1129   rtx p1, p2;
1130 
1131   /* Verify that I1 and I2 are equivalent.  */
1132   if (GET_CODE (i1) != GET_CODE (i2))
1133     return dir_none;
1134 
1135   /* __builtin_unreachable() may lead to empty blocks (ending with
1136      NOTE_INSN_BASIC_BLOCK).  They may be crossjumped. */
1137   if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
1138     return dir_both;
1139 
1140   /* ??? Do not allow cross-jumping between different stack levels.  */
1141   p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL);
1142   p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL);
1143   if (p1 && p2)
1144     {
1145       p1 = XEXP (p1, 0);
1146       p2 = XEXP (p2, 0);
1147       if (!rtx_equal_p (p1, p2))
1148         return dir_none;
1149 
1150       /* ??? Worse, this adjustment had better be constant lest we
1151          have differing incoming stack levels.  */
1152       if (!frame_pointer_needed
1153 	  && known_eq (find_args_size_adjust (i1), HOST_WIDE_INT_MIN))
1154 	return dir_none;
1155     }
1156   else if (p1 || p2)
1157     return dir_none;
1158 
1159   /* Do not allow cross-jumping between frame related insns and other
1160      insns.  */
1161   if (RTX_FRAME_RELATED_P (i1) != RTX_FRAME_RELATED_P (i2))
1162     return dir_none;
1163 
1164   p1 = PATTERN (i1);
1165   p2 = PATTERN (i2);
1166 
1167   if (GET_CODE (p1) != GET_CODE (p2))
1168     return dir_none;
1169 
1170   /* If this is a CALL_INSN, compare register usage information.
1171      If we don't check this on stack register machines, the two
1172      CALL_INSNs might be merged leaving reg-stack.c with mismatching
1173      numbers of stack registers in the same basic block.
1174      If we don't check this on machines with delay slots, a delay slot may
1175      be filled that clobbers a parameter expected by the subroutine.
1176 
1177      ??? We take the simple route for now and assume that if they're
1178      equal, they were constructed identically.
1179 
1180      Also check for identical exception regions.  */
1181 
1182   if (CALL_P (i1))
1183     {
1184       /* Ensure the same EH region.  */
1185       rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
1186       rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);
1187 
1188       if (!n1 && n2)
1189 	return dir_none;
1190 
1191       if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1192 	return dir_none;
1193 
1194       if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1195 			CALL_INSN_FUNCTION_USAGE (i2))
1196 	  || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
1197 	return dir_none;
1198 
1199       /* For address sanitizer, never crossjump __asan_report_* builtins,
1200 	 otherwise errors might be reported on incorrect lines.  */
1201       if (flag_sanitize & SANITIZE_ADDRESS)
1202 	{
1203 	  rtx call = get_call_rtx_from (i1);
1204 	  if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
1205 	    {
1206 	      rtx symbol = XEXP (XEXP (call, 0), 0);
1207 	      if (SYMBOL_REF_DECL (symbol)
1208 		  && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
1209 		{
1210 		  if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
1211 		       == BUILT_IN_NORMAL)
1212 		      && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1213 			 >= BUILT_IN_ASAN_REPORT_LOAD1
1214 		      && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1215 			 <= BUILT_IN_ASAN_STOREN)
1216 		    return dir_none;
1217 		}
1218 	    }
1219 	}
1220     }
1221 
1222   /* If both i1 and i2 are frame related, verify all the CFA notes
1223      in the same order and with the same content.  */
1224   if (RTX_FRAME_RELATED_P (i1) && !insns_have_identical_cfa_notes (i1, i2))
1225     return dir_none;
1226 
1227 #ifdef STACK_REGS
1228   /* If cross_jump_death_matters is not 0, the insn's mode
1229      indicates whether or not the insn contains any stack-like
1230      regs.  */
1231 
1232   if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1233     {
1234       /* If register stack conversion has already been done, then
1235 	 death notes must also be compared before it is certain that
1236 	 the two instruction streams match.  */
1237 
1238       rtx note;
1239       HARD_REG_SET i1_regset, i2_regset;
1240 
1241       CLEAR_HARD_REG_SET (i1_regset);
1242       CLEAR_HARD_REG_SET (i2_regset);
1243 
1244       for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1245 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1246 	  SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1247 
1248       for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1249 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1250 	  SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1251 
1252       if (!hard_reg_set_equal_p (i1_regset, i2_regset))
1253 	return dir_none;
1254     }
1255 #endif
1256 
1257   if (reload_completed
1258       ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1259     return dir_both;
1260 
1261   return can_replace_by (i1, i2);
1262 }
1263 
1264 /* When comparing insns I1 and I2 in flow_find_cross_jump or
1265    flow_find_head_matching_sequence, ensure the notes match.  */
1266 
1267 static void
1268 merge_notes (rtx_insn *i1, rtx_insn *i2)
1269 {
1270   /* If the merged insns have different REG_EQUAL notes, then
1271      remove them.  */
1272   rtx equiv1 = find_reg_equal_equiv_note (i1);
1273   rtx equiv2 = find_reg_equal_equiv_note (i2);
1274 
1275   if (equiv1 && !equiv2)
1276     remove_note (i1, equiv1);
1277   else if (!equiv1 && equiv2)
1278     remove_note (i2, equiv2);
1279   else if (equiv1 && equiv2
1280 	   && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1281     {
1282       remove_note (i1, equiv1);
1283       remove_note (i2, equiv2);
1284     }
1285 }
1286 
1287  /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
1288     resulting insn in I1, and the corresponding bb in BB1.  At the head of a
1289     bb, if there is a predecessor bb that reaches this bb via fallthru, and
1290     FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
1291     DID_FALLTHRU.  Otherwise, stops at the head of the bb.  */
1292 
1293 static void
1294 walk_to_nondebug_insn (rtx_insn **i1, basic_block *bb1, bool follow_fallthru,
1295                        bool *did_fallthru)
1296 {
1297   edge fallthru;
1298 
1299   *did_fallthru = false;
1300 
1301   /* Ignore notes.  */
1302   while (!NONDEBUG_INSN_P (*i1))
1303     {
1304       if (*i1 != BB_HEAD (*bb1))
1305         {
1306           *i1 = PREV_INSN (*i1);
1307           continue;
1308         }
1309 
1310       if (!follow_fallthru)
1311         return;
1312 
1313       fallthru = find_fallthru_edge ((*bb1)->preds);
1314       if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1315           || !single_succ_p (fallthru->src))
1316         return;
1317 
1318       *bb1 = fallthru->src;
1319       *i1 = BB_END (*bb1);
1320       *did_fallthru = true;
1321      }
1322 }
1323 
1324 /* Look through the insns at the end of BB1 and BB2 and find the longest
1325    sequence that are either equivalent, or allow forward or backward
1326    replacement.  Store the first insns for that sequence in *F1 and *F2 and
1327    return the sequence length.
1328 
1329    DIR_P indicates the allowed replacement direction on function entry, and
1330    the actual replacement direction on function exit.  If NULL, only equivalent
1331    sequences are allowed.
1332 
1333    To simplify callers of this function, if the blocks match exactly,
1334    store the head of the blocks in *F1 and *F2.  */
1335 
1336 int
1337 flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx_insn **f1,
1338 		      rtx_insn **f2, enum replace_direction *dir_p)
1339 {
1340   rtx_insn *i1, *i2, *last1, *last2, *afterlast1, *afterlast2;
1341   int ninsns = 0;
1342   enum replace_direction dir, last_dir, afterlast_dir;
1343   bool follow_fallthru, did_fallthru;
1344 
1345   if (dir_p)
1346     dir = *dir_p;
1347   else
1348     dir = dir_both;
1349   afterlast_dir = dir;
1350   last_dir = afterlast_dir;
1351 
1352   /* Skip simple jumps at the end of the blocks.  Complex jumps still
1353      need to be compared for equivalence, which we'll do below.  */
1354 
1355   i1 = BB_END (bb1);
1356   last1 = afterlast1 = last2 = afterlast2 = NULL;
1357   if (onlyjump_p (i1)
1358       || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1359     {
1360       last1 = i1;
1361       i1 = PREV_INSN (i1);
1362     }
1363 
1364   i2 = BB_END (bb2);
1365   if (onlyjump_p (i2)
1366       || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1367     {
1368       last2 = i2;
1369       /* Count everything except for unconditional jump as insn.
1370 	 Don't count any jumps if dir_p is NULL.  */
1371       if (!simplejump_p (i2) && !returnjump_p (i2) && last1 && dir_p)
1372 	ninsns++;
1373       i2 = PREV_INSN (i2);
1374     }
1375 
1376   while (true)
1377     {
1378       /* In the following example, we can replace all jumps to C by jumps to A.
1379 
1380          This removes 4 duplicate insns.
1381          [bb A] insn1            [bb C] insn1
1382                 insn2                   insn2
1383          [bb B] insn3                   insn3
1384                 insn4                   insn4
1385                 jump_insn               jump_insn
1386 
1387          We could also replace all jumps to A by jumps to C, but that leaves B
1388          alive, and removes only 2 duplicate insns.  In a subsequent crossjump
1389          step, all jumps to B would be replaced with jumps to the middle of C,
1390          achieving the same result with more effort.
1391          So we allow only the first possibility, which means that we don't allow
1392          fallthru in the block that's being replaced.  */
1393 
1394       follow_fallthru = dir_p && dir != dir_forward;
1395       walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru);
1396       if (did_fallthru)
1397         dir = dir_backward;
1398 
1399       follow_fallthru = dir_p && dir != dir_backward;
1400       walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru);
1401       if (did_fallthru)
1402         dir = dir_forward;
1403 
1404       if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1405 	break;
1406 
1407       /* Do not turn corssing edge to non-crossing or vice versa after
1408 	 reload. */
1409       if (BB_PARTITION (BLOCK_FOR_INSN (i1))
1410 	  != BB_PARTITION (BLOCK_FOR_INSN (i2))
1411 	  && reload_completed)
1412 	break;
1413 
1414       dir = merge_dir (dir, old_insns_match_p (0, i1, i2));
1415       if (dir == dir_none || (!dir_p && dir != dir_both))
1416 	break;
1417 
1418       merge_memattrs (i1, i2);
1419 
1420       /* Don't begin a cross-jump with a NOTE insn.  */
1421       if (INSN_P (i1))
1422 	{
1423 	  merge_notes (i1, i2);
1424 
1425 	  afterlast1 = last1, afterlast2 = last2;
1426 	  last1 = i1, last2 = i2;
1427 	  afterlast_dir = last_dir;
1428 	  last_dir = dir;
1429 	  if (active_insn_p (i1))
1430 	    ninsns++;
1431 	}
1432 
1433       i1 = PREV_INSN (i1);
1434       i2 = PREV_INSN (i2);
1435     }
1436 
1437   /* Don't allow the insn after a compare to be shared by
1438      cross-jumping unless the compare is also shared.  */
1439   if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
1440       && ! sets_cc0_p (last1))
1441     last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--;
1442 
1443   /* Include preceding notes and labels in the cross-jump.  One,
1444      this may bring us to the head of the blocks as requested above.
1445      Two, it keeps line number notes as matched as may be.  */
1446   if (ninsns)
1447     {
1448       bb1 = BLOCK_FOR_INSN (last1);
1449       while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
1450 	last1 = PREV_INSN (last1);
1451 
1452       if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1453 	last1 = PREV_INSN (last1);
1454 
1455       bb2 = BLOCK_FOR_INSN (last2);
1456       while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
1457 	last2 = PREV_INSN (last2);
1458 
1459       if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1460 	last2 = PREV_INSN (last2);
1461 
1462       *f1 = last1;
1463       *f2 = last2;
1464     }
1465 
1466   if (dir_p)
1467     *dir_p = last_dir;
1468   return ninsns;
1469 }
1470 
1471 /* Like flow_find_cross_jump, except start looking for a matching sequence from
1472    the head of the two blocks.  Do not include jumps at the end.
1473    If STOP_AFTER is nonzero, stop after finding that many matching
1474    instructions.  If STOP_AFTER is zero, count all INSN_P insns, if it is
1475    non-zero, only count active insns.  */
1476 
1477 int
1478 flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx_insn **f1,
1479 				  rtx_insn **f2, int stop_after)
1480 {
1481   rtx_insn *i1, *i2, *last1, *last2, *beforelast1, *beforelast2;
1482   int ninsns = 0;
1483   edge e;
1484   edge_iterator ei;
1485   int nehedges1 = 0, nehedges2 = 0;
1486 
1487   FOR_EACH_EDGE (e, ei, bb1->succs)
1488     if (e->flags & EDGE_EH)
1489       nehedges1++;
1490   FOR_EACH_EDGE (e, ei, bb2->succs)
1491     if (e->flags & EDGE_EH)
1492       nehedges2++;
1493 
1494   i1 = BB_HEAD (bb1);
1495   i2 = BB_HEAD (bb2);
1496   last1 = beforelast1 = last2 = beforelast2 = NULL;
1497 
1498   while (true)
1499     {
1500       /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG.  */
1501       while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
1502 	{
1503 	  if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG)
1504 	    break;
1505 	  i1 = NEXT_INSN (i1);
1506 	}
1507 
1508       while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
1509 	{
1510 	  if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG)
1511 	    break;
1512 	  i2 = NEXT_INSN (i2);
1513 	}
1514 
1515       if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1))
1516 	  || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2)))
1517 	break;
1518 
1519       if (NOTE_P (i1) || NOTE_P (i2)
1520 	  || JUMP_P (i1) || JUMP_P (i2))
1521 	break;
1522 
1523       /* A sanity check to make sure we're not merging insns with different
1524 	 effects on EH.  If only one of them ends a basic block, it shouldn't
1525 	 have an EH edge; if both end a basic block, there should be the same
1526 	 number of EH edges.  */
1527       if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
1528 	   && nehedges1 > 0)
1529 	  || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
1530 	      && nehedges2 > 0)
1531 	  || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
1532 	      && nehedges1 != nehedges2))
1533 	break;
1534 
1535       if (old_insns_match_p (0, i1, i2) != dir_both)
1536 	break;
1537 
1538       merge_memattrs (i1, i2);
1539 
1540       /* Don't begin a cross-jump with a NOTE insn.  */
1541       if (INSN_P (i1))
1542 	{
1543 	  merge_notes (i1, i2);
1544 
1545 	  beforelast1 = last1, beforelast2 = last2;
1546 	  last1 = i1, last2 = i2;
1547 	  if (!stop_after || active_insn_p (i1))
1548 	    ninsns++;
1549 	}
1550 
1551       if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
1552 	  || (stop_after > 0 && ninsns == stop_after))
1553 	break;
1554 
1555       i1 = NEXT_INSN (i1);
1556       i2 = NEXT_INSN (i2);
1557     }
1558 
1559   /* Don't allow a compare to be shared by cross-jumping unless the insn
1560      after the compare is also shared.  */
1561   if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
1562       && sets_cc0_p (last1))
1563     last1 = beforelast1, last2 = beforelast2, ninsns--;
1564 
1565   if (ninsns)
1566     {
1567       *f1 = last1;
1568       *f2 = last2;
1569     }
1570 
1571   return ninsns;
1572 }
1573 
1574 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1575    the branch instruction.  This means that if we commonize the control
1576    flow before end of the basic block, the semantic remains unchanged.
1577 
1578    We may assume that there exists one edge with a common destination.  */
1579 
1580 static bool
1581 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1582 {
1583   int nehedges1 = 0, nehedges2 = 0;
1584   edge fallthru1 = 0, fallthru2 = 0;
1585   edge e1, e2;
1586   edge_iterator ei;
1587 
1588   /* If we performed shrink-wrapping, edges to the exit block can
1589      only be distinguished for JUMP_INSNs.  The two paths may differ in
1590      whether they went through the prologue.  Sibcalls are fine, we know
1591      that we either didn't need or inserted an epilogue before them.  */
1592   if (crtl->shrink_wrapped
1593       && single_succ_p (bb1)
1594       && single_succ (bb1) == EXIT_BLOCK_PTR_FOR_FN (cfun)
1595       && !JUMP_P (BB_END (bb1))
1596       && !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1))))
1597     return false;
1598 
1599   /* If BB1 has only one successor, we may be looking at either an
1600      unconditional jump, or a fake edge to exit.  */
1601   if (single_succ_p (bb1)
1602       && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1603       && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1604     return (single_succ_p (bb2)
1605 	    && (single_succ_edge (bb2)->flags
1606 		& (EDGE_COMPLEX | EDGE_FAKE)) == 0
1607 	    && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1608 
1609   /* Match conditional jumps - this may get tricky when fallthru and branch
1610      edges are crossed.  */
1611   if (EDGE_COUNT (bb1->succs) == 2
1612       && any_condjump_p (BB_END (bb1))
1613       && onlyjump_p (BB_END (bb1)))
1614     {
1615       edge b1, f1, b2, f2;
1616       bool reverse, match;
1617       rtx set1, set2, cond1, cond2;
1618       enum rtx_code code1, code2;
1619 
1620       if (EDGE_COUNT (bb2->succs) != 2
1621 	  || !any_condjump_p (BB_END (bb2))
1622 	  || !onlyjump_p (BB_END (bb2)))
1623 	return false;
1624 
1625       b1 = BRANCH_EDGE (bb1);
1626       b2 = BRANCH_EDGE (bb2);
1627       f1 = FALLTHRU_EDGE (bb1);
1628       f2 = FALLTHRU_EDGE (bb2);
1629 
1630       /* Get around possible forwarders on fallthru edges.  Other cases
1631 	 should be optimized out already.  */
1632       if (FORWARDER_BLOCK_P (f1->dest))
1633 	f1 = single_succ_edge (f1->dest);
1634 
1635       if (FORWARDER_BLOCK_P (f2->dest))
1636 	f2 = single_succ_edge (f2->dest);
1637 
1638       /* To simplify use of this function, return false if there are
1639 	 unneeded forwarder blocks.  These will get eliminated later
1640 	 during cleanup_cfg.  */
1641       if (FORWARDER_BLOCK_P (f1->dest)
1642 	  || FORWARDER_BLOCK_P (f2->dest)
1643 	  || FORWARDER_BLOCK_P (b1->dest)
1644 	  || FORWARDER_BLOCK_P (b2->dest))
1645 	return false;
1646 
1647       if (f1->dest == f2->dest && b1->dest == b2->dest)
1648 	reverse = false;
1649       else if (f1->dest == b2->dest && b1->dest == f2->dest)
1650 	reverse = true;
1651       else
1652 	return false;
1653 
1654       set1 = pc_set (BB_END (bb1));
1655       set2 = pc_set (BB_END (bb2));
1656       if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1657 	  != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1658 	reverse = !reverse;
1659 
1660       cond1 = XEXP (SET_SRC (set1), 0);
1661       cond2 = XEXP (SET_SRC (set2), 0);
1662       code1 = GET_CODE (cond1);
1663       if (reverse)
1664 	code2 = reversed_comparison_code (cond2, BB_END (bb2));
1665       else
1666 	code2 = GET_CODE (cond2);
1667 
1668       if (code2 == UNKNOWN)
1669 	return false;
1670 
1671       /* Verify codes and operands match.  */
1672       match = ((code1 == code2
1673 		&& rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1674 		&& rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1675 	       || (code1 == swap_condition (code2)
1676 		   && rtx_renumbered_equal_p (XEXP (cond1, 1),
1677 					      XEXP (cond2, 0))
1678 		   && rtx_renumbered_equal_p (XEXP (cond1, 0),
1679 					      XEXP (cond2, 1))));
1680 
1681       /* If we return true, we will join the blocks.  Which means that
1682 	 we will only have one branch prediction bit to work with.  Thus
1683 	 we require the existing branches to have probabilities that are
1684 	 roughly similar.  */
1685       if (match
1686 	  && optimize_bb_for_speed_p (bb1)
1687 	  && optimize_bb_for_speed_p (bb2))
1688 	{
1689 	  profile_probability prob2;
1690 
1691 	  if (b1->dest == b2->dest)
1692 	    prob2 = b2->probability;
1693 	  else
1694 	    /* Do not use f2 probability as f2 may be forwarded.  */
1695 	    prob2 = b2->probability.invert ();
1696 
1697 	  /* Fail if the difference in probabilities is greater than 50%.
1698 	     This rules out two well-predicted branches with opposite
1699 	     outcomes.  */
1700 	  if (b1->probability.differs_lot_from_p (prob2))
1701 	    {
1702 	      if (dump_file)
1703 		{
1704 		  fprintf (dump_file,
1705 			   "Outcomes of branch in bb %i and %i differ too"
1706 			   " much (", bb1->index, bb2->index);
1707 		  b1->probability.dump (dump_file);
1708 		  prob2.dump (dump_file);
1709 		  fprintf (dump_file, ")\n");
1710 		}
1711 	      return false;
1712 	    }
1713 	}
1714 
1715       if (dump_file && match)
1716 	fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1717 		 bb1->index, bb2->index);
1718 
1719       return match;
1720     }
1721 
1722   /* Generic case - we are seeing a computed jump, table jump or trapping
1723      instruction.  */
1724 
1725   /* Check whether there are tablejumps in the end of BB1 and BB2.
1726      Return true if they are identical.  */
1727     {
1728       rtx_insn *label1, *label2;
1729       rtx_jump_table_data *table1, *table2;
1730 
1731       if (tablejump_p (BB_END (bb1), &label1, &table1)
1732 	  && tablejump_p (BB_END (bb2), &label2, &table2)
1733 	  && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1734 	{
1735 	  /* The labels should never be the same rtx.  If they really are same
1736 	     the jump tables are same too. So disable crossjumping of blocks BB1
1737 	     and BB2 because when deleting the common insns in the end of BB1
1738 	     by delete_basic_block () the jump table would be deleted too.  */
1739 	  /* If LABEL2 is referenced in BB1->END do not do anything
1740 	     because we would loose information when replacing
1741 	     LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END.  */
1742 	  if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1743 	    {
1744 	      /* Set IDENTICAL to true when the tables are identical.  */
1745 	      bool identical = false;
1746 	      rtx p1, p2;
1747 
1748 	      p1 = PATTERN (table1);
1749 	      p2 = PATTERN (table2);
1750 	      if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1751 		{
1752 		  identical = true;
1753 		}
1754 	      else if (GET_CODE (p1) == ADDR_DIFF_VEC
1755 		       && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1756 		       && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1757 		       && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1758 		{
1759 		  int i;
1760 
1761 		  identical = true;
1762 		  for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1763 		    if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1764 		      identical = false;
1765 		}
1766 
1767 	      if (identical)
1768 		{
1769 		  bool match;
1770 
1771 		  /* Temporarily replace references to LABEL1 with LABEL2
1772 		     in BB1->END so that we could compare the instructions.  */
1773 		  replace_label_in_insn (BB_END (bb1), label1, label2, false);
1774 
1775 		  match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))
1776 			   == dir_both);
1777 		  if (dump_file && match)
1778 		    fprintf (dump_file,
1779 			     "Tablejumps in bb %i and %i match.\n",
1780 			     bb1->index, bb2->index);
1781 
1782 		  /* Set the original label in BB1->END because when deleting
1783 		     a block whose end is a tablejump, the tablejump referenced
1784 		     from the instruction is deleted too.  */
1785 		  replace_label_in_insn (BB_END (bb1), label2, label1, false);
1786 
1787 		  return match;
1788 		}
1789 	    }
1790 	  return false;
1791 	}
1792     }
1793 
1794   /* Find the last non-debug non-note instruction in each bb, except
1795      stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p
1796      handles that case specially. old_insns_match_p does not handle
1797      other types of instruction notes.  */
1798   rtx_insn *last1 = BB_END (bb1);
1799   rtx_insn *last2 = BB_END (bb2);
1800   while (!NOTE_INSN_BASIC_BLOCK_P (last1) &&
1801          (DEBUG_INSN_P (last1) || NOTE_P (last1)))
1802     last1 = PREV_INSN (last1);
1803   while (!NOTE_INSN_BASIC_BLOCK_P (last2) &&
1804          (DEBUG_INSN_P (last2) || NOTE_P (last2)))
1805     last2 = PREV_INSN (last2);
1806   gcc_assert (last1 && last2);
1807 
1808   /* First ensure that the instructions match.  There may be many outgoing
1809      edges so this test is generally cheaper.  */
1810   if (old_insns_match_p (mode, last1, last2) != dir_both)
1811     return false;
1812 
1813   /* Search the outgoing edges, ensure that the counts do match, find possible
1814      fallthru and exception handling edges since these needs more
1815      validation.  */
1816   if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1817     return false;
1818 
1819   bool nonfakeedges = false;
1820   FOR_EACH_EDGE (e1, ei, bb1->succs)
1821     {
1822       e2 = EDGE_SUCC (bb2, ei.index);
1823 
1824       if ((e1->flags & EDGE_FAKE) == 0)
1825 	nonfakeedges = true;
1826 
1827       if (e1->flags & EDGE_EH)
1828 	nehedges1++;
1829 
1830       if (e2->flags & EDGE_EH)
1831 	nehedges2++;
1832 
1833       if (e1->flags & EDGE_FALLTHRU)
1834 	fallthru1 = e1;
1835       if (e2->flags & EDGE_FALLTHRU)
1836 	fallthru2 = e2;
1837     }
1838 
1839   /* If number of edges of various types does not match, fail.  */
1840   if (nehedges1 != nehedges2
1841       || (fallthru1 != 0) != (fallthru2 != 0))
1842     return false;
1843 
1844   /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors
1845      and the last real insn doesn't have REG_ARGS_SIZE note, don't
1846      attempt to optimize, as the two basic blocks might have different
1847      REG_ARGS_SIZE depths.  For noreturn calls and unconditional
1848      traps there should be REG_ARG_SIZE notes, they could be missing
1849      for __builtin_unreachable () uses though.  */
1850   if (!nonfakeedges
1851       && !ACCUMULATE_OUTGOING_ARGS
1852       && (!INSN_P (last1)
1853           || !find_reg_note (last1, REG_ARGS_SIZE, NULL)))
1854     return false;
1855 
1856   /* fallthru edges must be forwarded to the same destination.  */
1857   if (fallthru1)
1858     {
1859       basic_block d1 = (forwarder_block_p (fallthru1->dest)
1860 			? single_succ (fallthru1->dest): fallthru1->dest);
1861       basic_block d2 = (forwarder_block_p (fallthru2->dest)
1862 			? single_succ (fallthru2->dest): fallthru2->dest);
1863 
1864       if (d1 != d2)
1865 	return false;
1866     }
1867 
1868   /* Ensure the same EH region.  */
1869   {
1870     rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1871     rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1872 
1873     if (!n1 && n2)
1874       return false;
1875 
1876     if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1877       return false;
1878   }
1879 
1880   /* The same checks as in try_crossjump_to_edge. It is required for RTL
1881      version of sequence abstraction.  */
1882   FOR_EACH_EDGE (e1, ei, bb2->succs)
1883     {
1884       edge e2;
1885       edge_iterator ei;
1886       basic_block d1 = e1->dest;
1887 
1888       if (FORWARDER_BLOCK_P (d1))
1889         d1 = EDGE_SUCC (d1, 0)->dest;
1890 
1891       FOR_EACH_EDGE (e2, ei, bb1->succs)
1892         {
1893           basic_block d2 = e2->dest;
1894           if (FORWARDER_BLOCK_P (d2))
1895             d2 = EDGE_SUCC (d2, 0)->dest;
1896           if (d1 == d2)
1897             break;
1898         }
1899 
1900       if (!e2)
1901         return false;
1902     }
1903 
1904   return true;
1905 }
1906 
1907 /* Returns true if BB basic block has a preserve label.  */
1908 
1909 static bool
1910 block_has_preserve_label (basic_block bb)
1911 {
1912   return (bb
1913           && block_label (bb)
1914           && LABEL_PRESERVE_P (block_label (bb)));
1915 }
1916 
1917 /* E1 and E2 are edges with the same destination block.  Search their
1918    predecessors for common code.  If found, redirect control flow from
1919    (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
1920    or the other way around (dir_backward).  DIR specifies the allowed
1921    replacement direction.  */
1922 
1923 static bool
1924 try_crossjump_to_edge (int mode, edge e1, edge e2,
1925                        enum replace_direction dir)
1926 {
1927   int nmatch;
1928   basic_block src1 = e1->src, src2 = e2->src;
1929   basic_block redirect_to, redirect_from, to_remove;
1930   basic_block osrc1, osrc2, redirect_edges_to, tmp;
1931   rtx_insn *newpos1, *newpos2;
1932   edge s;
1933   edge_iterator ei;
1934 
1935   newpos1 = newpos2 = NULL;
1936 
1937   /* Search backward through forwarder blocks.  We don't need to worry
1938      about multiple entry or chained forwarders, as they will be optimized
1939      away.  We do this to look past the unconditional jump following a
1940      conditional jump that is required due to the current CFG shape.  */
1941   if (single_pred_p (src1)
1942       && FORWARDER_BLOCK_P (src1))
1943     e1 = single_pred_edge (src1), src1 = e1->src;
1944 
1945   if (single_pred_p (src2)
1946       && FORWARDER_BLOCK_P (src2))
1947     e2 = single_pred_edge (src2), src2 = e2->src;
1948 
1949   /* Nothing to do if we reach ENTRY, or a common source block.  */
1950   if (src1 == ENTRY_BLOCK_PTR_FOR_FN (cfun) || src2
1951       == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1952     return false;
1953   if (src1 == src2)
1954     return false;
1955 
1956   /* Seeing more than 1 forwarder blocks would confuse us later...  */
1957   if (FORWARDER_BLOCK_P (e1->dest)
1958       && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1959     return false;
1960 
1961   if (FORWARDER_BLOCK_P (e2->dest)
1962       && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1963     return false;
1964 
1965   /* Likewise with dead code (possibly newly created by the other optimizations
1966      of cfg_cleanup).  */
1967   if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1968     return false;
1969 
1970   /* Do not turn corssing edge to non-crossing or vice versa after reload.  */
1971   if (BB_PARTITION (src1) != BB_PARTITION (src2)
1972       && reload_completed)
1973     return false;
1974 
1975   /* Look for the common insn sequence, part the first ...  */
1976   if (!outgoing_edges_match (mode, src1, src2))
1977     return false;
1978 
1979   /* ... and part the second.  */
1980   nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir);
1981 
1982   osrc1 = src1;
1983   osrc2 = src2;
1984   if (newpos1 != NULL_RTX)
1985     src1 = BLOCK_FOR_INSN (newpos1);
1986   if (newpos2 != NULL_RTX)
1987     src2 = BLOCK_FOR_INSN (newpos2);
1988 
1989   /* Check that SRC1 and SRC2 have preds again.  They may have changed
1990      above due to the call to flow_find_cross_jump.  */
1991   if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1992     return false;
1993 
1994   if (dir == dir_backward)
1995     {
1996       std::swap (osrc1, osrc2);
1997       std::swap (src1, src2);
1998       std::swap (e1, e2);
1999       std::swap (newpos1, newpos2);
2000     }
2001 
2002   /* Don't proceed with the crossjump unless we found a sufficient number
2003      of matching instructions or the 'from' block was totally matched
2004      (such that its predecessors will hopefully be redirected and the
2005      block removed).  */
2006   if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
2007       && (newpos1 != BB_HEAD (src1)))
2008     return false;
2009 
2010   /* Avoid deleting preserve label when redirecting ABNORMAL edges.  */
2011   if (block_has_preserve_label (e1->dest)
2012       && (e1->flags & EDGE_ABNORMAL))
2013     return false;
2014 
2015   /* Here we know that the insns in the end of SRC1 which are common with SRC2
2016      will be deleted.
2017      If we have tablejumps in the end of SRC1 and SRC2
2018      they have been already compared for equivalence in outgoing_edges_match ()
2019      so replace the references to TABLE1 by references to TABLE2.  */
2020   {
2021       rtx_insn *label1, *label2;
2022       rtx_jump_table_data *table1, *table2;
2023 
2024       if (tablejump_p (BB_END (osrc1), &label1, &table1)
2025 	  && tablejump_p (BB_END (osrc2), &label2, &table2)
2026 	  && label1 != label2)
2027 	{
2028 	  rtx_insn *insn;
2029 
2030 	  /* Replace references to LABEL1 with LABEL2.  */
2031 	  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2032 	    {
2033 	      /* Do not replace the label in SRC1->END because when deleting
2034 		 a block whose end is a tablejump, the tablejump referenced
2035 		 from the instruction is deleted too.  */
2036 	      if (insn != BB_END (osrc1))
2037 		replace_label_in_insn (insn, label1, label2, true);
2038 	    }
2039 	}
2040   }
2041 
2042   /* Avoid splitting if possible.  We must always split when SRC2 has
2043      EH predecessor edges, or we may end up with basic blocks with both
2044      normal and EH predecessor edges.  */
2045   if (newpos2 == BB_HEAD (src2)
2046       && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
2047     redirect_to = src2;
2048   else
2049     {
2050       if (newpos2 == BB_HEAD (src2))
2051 	{
2052 	  /* Skip possible basic block header.  */
2053 	  if (LABEL_P (newpos2))
2054 	    newpos2 = NEXT_INSN (newpos2);
2055 	  while (DEBUG_INSN_P (newpos2))
2056 	    newpos2 = NEXT_INSN (newpos2);
2057 	  if (NOTE_P (newpos2))
2058 	    newpos2 = NEXT_INSN (newpos2);
2059 	  while (DEBUG_INSN_P (newpos2))
2060 	    newpos2 = NEXT_INSN (newpos2);
2061 	}
2062 
2063       if (dump_file)
2064 	fprintf (dump_file, "Splitting bb %i before %i insns\n",
2065 		 src2->index, nmatch);
2066       redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
2067     }
2068 
2069   if (dump_file)
2070     fprintf (dump_file,
2071 	     "Cross jumping from bb %i to bb %i; %i common insns\n",
2072 	     src1->index, src2->index, nmatch);
2073 
2074   /* We may have some registers visible through the block.  */
2075   df_set_bb_dirty (redirect_to);
2076 
2077   if (osrc2 == src2)
2078     redirect_edges_to = redirect_to;
2079   else
2080     redirect_edges_to = osrc2;
2081 
2082   /* Recompute the counts of destinations of outgoing edges.  */
2083   FOR_EACH_EDGE (s, ei, redirect_edges_to->succs)
2084     {
2085       edge s2;
2086       edge_iterator ei;
2087       basic_block d = s->dest;
2088 
2089       if (FORWARDER_BLOCK_P (d))
2090 	d = single_succ (d);
2091 
2092       FOR_EACH_EDGE (s2, ei, src1->succs)
2093 	{
2094 	  basic_block d2 = s2->dest;
2095 	  if (FORWARDER_BLOCK_P (d2))
2096 	    d2 = single_succ (d2);
2097 	  if (d == d2)
2098 	    break;
2099 	}
2100 
2101       /* Take care to update possible forwarder blocks.  We verified
2102 	 that there is no more than one in the chain, so we can't run
2103 	 into infinite loop.  */
2104       if (FORWARDER_BLOCK_P (s->dest))
2105 	s->dest->count += s->count ();
2106 
2107       if (FORWARDER_BLOCK_P (s2->dest))
2108 	s2->dest->count -= s->count ();
2109 
2110       s->probability = s->probability.combine_with_count
2111 			  (redirect_edges_to->count,
2112 			   s2->probability, src1->count);
2113     }
2114 
2115   /* Adjust count for the block.  An earlier jump
2116      threading pass may have left the profile in an inconsistent
2117      state (see update_bb_profile_for_threading) so we must be
2118      prepared for overflows.  */
2119   tmp = redirect_to;
2120   do
2121     {
2122       tmp->count += src1->count;
2123       if (tmp == redirect_edges_to)
2124         break;
2125       tmp = find_fallthru_edge (tmp->succs)->dest;
2126     }
2127   while (true);
2128   update_br_prob_note (redirect_edges_to);
2129 
2130   /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1.  */
2131 
2132   /* Skip possible basic block header.  */
2133   if (LABEL_P (newpos1))
2134     newpos1 = NEXT_INSN (newpos1);
2135 
2136   while (DEBUG_INSN_P (newpos1))
2137     newpos1 = NEXT_INSN (newpos1);
2138 
2139   if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
2140     newpos1 = NEXT_INSN (newpos1);
2141 
2142   while (DEBUG_INSN_P (newpos1))
2143     newpos1 = NEXT_INSN (newpos1);
2144 
2145   redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
2146   to_remove = single_succ (redirect_from);
2147 
2148   redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
2149   delete_basic_block (to_remove);
2150 
2151   update_forwarder_flag (redirect_from);
2152   if (redirect_to != src2)
2153     update_forwarder_flag (src2);
2154 
2155   return true;
2156 }
2157 
2158 /* Search the predecessors of BB for common insn sequences.  When found,
2159    share code between them by redirecting control flow.  Return true if
2160    any changes made.  */
2161 
2162 static bool
2163 try_crossjump_bb (int mode, basic_block bb)
2164 {
2165   edge e, e2, fallthru;
2166   bool changed;
2167   unsigned max, ix, ix2;
2168 
2169   /* Nothing to do if there is not at least two incoming edges.  */
2170   if (EDGE_COUNT (bb->preds) < 2)
2171     return false;
2172 
2173   /* Don't crossjump if this block ends in a computed jump,
2174      unless we are optimizing for size.  */
2175   if (optimize_bb_for_size_p (bb)
2176       && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
2177       && computed_jump_p (BB_END (bb)))
2178     return false;
2179 
2180   /* If we are partitioning hot/cold basic blocks, we don't want to
2181      mess up unconditional or indirect jumps that cross between hot
2182      and cold sections.
2183 
2184      Basic block partitioning may result in some jumps that appear to
2185      be optimizable (or blocks that appear to be mergeable), but which really
2186      must be left untouched (they are required to make it safely across
2187      partition boundaries).  See the comments at the top of
2188      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
2189 
2190   if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
2191 					BB_PARTITION (EDGE_PRED (bb, 1)->src)
2192       || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
2193     return false;
2194 
2195   /* It is always cheapest to redirect a block that ends in a branch to
2196      a block that falls through into BB, as that adds no branches to the
2197      program.  We'll try that combination first.  */
2198   fallthru = NULL;
2199   max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
2200 
2201   if (EDGE_COUNT (bb->preds) > max)
2202     return false;
2203 
2204   fallthru = find_fallthru_edge (bb->preds);
2205 
2206   changed = false;
2207   for (ix = 0; ix < EDGE_COUNT (bb->preds);)
2208     {
2209       e = EDGE_PRED (bb, ix);
2210       ix++;
2211 
2212       /* As noted above, first try with the fallthru predecessor (or, a
2213 	 fallthru predecessor if we are in cfglayout mode).  */
2214       if (fallthru)
2215 	{
2216 	  /* Don't combine the fallthru edge into anything else.
2217 	     If there is a match, we'll do it the other way around.  */
2218 	  if (e == fallthru)
2219 	    continue;
2220 	  /* If nothing changed since the last attempt, there is nothing
2221 	     we can do.  */
2222 	  if (!first_pass
2223 	      && !((e->src->flags & BB_MODIFIED)
2224 		   || (fallthru->src->flags & BB_MODIFIED)))
2225 	    continue;
2226 
2227 	  if (try_crossjump_to_edge (mode, e, fallthru, dir_forward))
2228 	    {
2229 	      changed = true;
2230 	      ix = 0;
2231 	      continue;
2232 	    }
2233 	}
2234 
2235       /* Non-obvious work limiting check: Recognize that we're going
2236 	 to call try_crossjump_bb on every basic block.  So if we have
2237 	 two blocks with lots of outgoing edges (a switch) and they
2238 	 share lots of common destinations, then we would do the
2239 	 cross-jump check once for each common destination.
2240 
2241 	 Now, if the blocks actually are cross-jump candidates, then
2242 	 all of their destinations will be shared.  Which means that
2243 	 we only need check them for cross-jump candidacy once.  We
2244 	 can eliminate redundant checks of crossjump(A,B) by arbitrarily
2245 	 choosing to do the check from the block for which the edge
2246 	 in question is the first successor of A.  */
2247       if (EDGE_SUCC (e->src, 0) != e)
2248 	continue;
2249 
2250       for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++)
2251 	{
2252 	  e2 = EDGE_PRED (bb, ix2);
2253 
2254 	  if (e2 == e)
2255 	    continue;
2256 
2257 	  /* We've already checked the fallthru edge above.  */
2258 	  if (e2 == fallthru)
2259 	    continue;
2260 
2261 	  /* The "first successor" check above only prevents multiple
2262 	     checks of crossjump(A,B).  In order to prevent redundant
2263 	     checks of crossjump(B,A), require that A be the block
2264 	     with the lowest index.  */
2265 	  if (e->src->index > e2->src->index)
2266 	    continue;
2267 
2268 	  /* If nothing changed since the last attempt, there is nothing
2269 	     we can do.  */
2270 	  if (!first_pass
2271 	      && !((e->src->flags & BB_MODIFIED)
2272 		   || (e2->src->flags & BB_MODIFIED)))
2273 	    continue;
2274 
2275 	  /* Both e and e2 are not fallthru edges, so we can crossjump in either
2276 	     direction.  */
2277 	  if (try_crossjump_to_edge (mode, e, e2, dir_both))
2278 	    {
2279 	      changed = true;
2280 	      ix = 0;
2281 	      break;
2282 	    }
2283 	}
2284     }
2285 
2286   if (changed)
2287     crossjumps_occurred = true;
2288 
2289   return changed;
2290 }
2291 
2292 /* Search the successors of BB for common insn sequences.  When found,
2293    share code between them by moving it across the basic block
2294    boundary.  Return true if any changes made.  */
2295 
2296 static bool
2297 try_head_merge_bb (basic_block bb)
2298 {
2299   basic_block final_dest_bb = NULL;
2300   int max_match = INT_MAX;
2301   edge e0;
2302   rtx_insn **headptr, **currptr, **nextptr;
2303   bool changed, moveall;
2304   unsigned ix;
2305   rtx_insn *e0_last_head;
2306   rtx cond;
2307   rtx_insn *move_before;
2308   unsigned nedges = EDGE_COUNT (bb->succs);
2309   rtx_insn *jump = BB_END (bb);
2310   regset live, live_union;
2311 
2312   /* Nothing to do if there is not at least two outgoing edges.  */
2313   if (nedges < 2)
2314     return false;
2315 
2316   /* Don't crossjump if this block ends in a computed jump,
2317      unless we are optimizing for size.  */
2318   if (optimize_bb_for_size_p (bb)
2319       && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
2320       && computed_jump_p (BB_END (bb)))
2321     return false;
2322 
2323   cond = get_condition (jump, &move_before, true, false);
2324   if (cond == NULL_RTX)
2325     {
2326       if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2327 	move_before = prev_nonnote_nondebug_insn (jump);
2328       else
2329 	move_before = jump;
2330     }
2331 
2332   for (ix = 0; ix < nedges; ix++)
2333     if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
2334       return false;
2335 
2336   for (ix = 0; ix < nedges; ix++)
2337     {
2338       edge e = EDGE_SUCC (bb, ix);
2339       basic_block other_bb = e->dest;
2340 
2341       if (df_get_bb_dirty (other_bb))
2342 	{
2343 	  block_was_dirty = true;
2344 	  return false;
2345 	}
2346 
2347       if (e->flags & EDGE_ABNORMAL)
2348 	return false;
2349 
2350       /* Normally, all destination blocks must only be reachable from this
2351 	 block, i.e. they must have one incoming edge.
2352 
2353 	 There is one special case we can handle, that of multiple consecutive
2354 	 jumps where the first jumps to one of the targets of the second jump.
2355 	 This happens frequently in switch statements for default labels.
2356 	 The structure is as follows:
2357 	 FINAL_DEST_BB
2358 	 ....
2359 	 if (cond) jump A;
2360 	 fall through
2361 	 BB
2362 	 jump with targets A, B, C, D...
2363 	 A
2364 	 has two incoming edges, from FINAL_DEST_BB and BB
2365 
2366 	 In this case, we can try to move the insns through BB and into
2367 	 FINAL_DEST_BB.  */
2368       if (EDGE_COUNT (other_bb->preds) != 1)
2369 	{
2370 	  edge incoming_edge, incoming_bb_other_edge;
2371 	  edge_iterator ei;
2372 
2373 	  if (final_dest_bb != NULL
2374 	      || EDGE_COUNT (other_bb->preds) != 2)
2375 	    return false;
2376 
2377 	  /* We must be able to move the insns across the whole block.  */
2378 	  move_before = BB_HEAD (bb);
2379 	  while (!NONDEBUG_INSN_P (move_before))
2380 	    move_before = NEXT_INSN (move_before);
2381 
2382 	  if (EDGE_COUNT (bb->preds) != 1)
2383 	    return false;
2384 	  incoming_edge = EDGE_PRED (bb, 0);
2385 	  final_dest_bb = incoming_edge->src;
2386 	  if (EDGE_COUNT (final_dest_bb->succs) != 2)
2387 	    return false;
2388 	  FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs)
2389 	    if (incoming_bb_other_edge != incoming_edge)
2390 	      break;
2391 	  if (incoming_bb_other_edge->dest != other_bb)
2392 	    return false;
2393 	}
2394     }
2395 
2396   e0 = EDGE_SUCC (bb, 0);
2397   e0_last_head = NULL;
2398   changed = false;
2399 
2400   for (ix = 1; ix < nedges; ix++)
2401     {
2402       edge e = EDGE_SUCC (bb, ix);
2403       rtx_insn *e0_last, *e_last;
2404       int nmatch;
2405 
2406       nmatch = flow_find_head_matching_sequence (e0->dest, e->dest,
2407 						 &e0_last, &e_last, 0);
2408       if (nmatch == 0)
2409 	return false;
2410 
2411       if (nmatch < max_match)
2412 	{
2413 	  max_match = nmatch;
2414 	  e0_last_head = e0_last;
2415 	}
2416     }
2417 
2418   /* If we matched an entire block, we probably have to avoid moving the
2419      last insn.  */
2420   if (max_match > 0
2421       && e0_last_head == BB_END (e0->dest)
2422       && (find_reg_note (e0_last_head, REG_EH_REGION, 0)
2423 	  || control_flow_insn_p (e0_last_head)))
2424     {
2425       max_match--;
2426       if (max_match == 0)
2427 	return false;
2428       e0_last_head = prev_real_nondebug_insn (e0_last_head);
2429     }
2430 
2431   if (max_match == 0)
2432     return false;
2433 
2434   /* We must find a union of the live registers at each of the end points.  */
2435   live = BITMAP_ALLOC (NULL);
2436   live_union = BITMAP_ALLOC (NULL);
2437 
2438   currptr = XNEWVEC (rtx_insn *, nedges);
2439   headptr = XNEWVEC (rtx_insn *, nedges);
2440   nextptr = XNEWVEC (rtx_insn *, nedges);
2441 
2442   for (ix = 0; ix < nedges; ix++)
2443     {
2444       int j;
2445       basic_block merge_bb = EDGE_SUCC (bb, ix)->dest;
2446       rtx_insn *head = BB_HEAD (merge_bb);
2447 
2448       while (!NONDEBUG_INSN_P (head))
2449 	head = NEXT_INSN (head);
2450       headptr[ix] = head;
2451       currptr[ix] = head;
2452 
2453       /* Compute the end point and live information  */
2454       for (j = 1; j < max_match; j++)
2455 	do
2456 	  head = NEXT_INSN (head);
2457 	while (!NONDEBUG_INSN_P (head));
2458       simulate_backwards_to_point (merge_bb, live, head);
2459       IOR_REG_SET (live_union, live);
2460     }
2461 
2462   /* If we're moving across two blocks, verify the validity of the
2463      first move, then adjust the target and let the loop below deal
2464      with the final move.  */
2465   if (final_dest_bb != NULL)
2466     {
2467       rtx_insn *move_upto;
2468 
2469       moveall = can_move_insns_across (currptr[0], e0_last_head, move_before,
2470 				       jump, e0->dest, live_union,
2471 				       NULL, &move_upto);
2472       if (!moveall)
2473 	{
2474 	  if (move_upto == NULL_RTX)
2475 	    goto out;
2476 
2477 	  while (e0_last_head != move_upto)
2478 	    {
2479 	      df_simulate_one_insn_backwards (e0->dest, e0_last_head,
2480 					      live_union);
2481 	      e0_last_head = PREV_INSN (e0_last_head);
2482 	    }
2483 	}
2484       if (e0_last_head == NULL_RTX)
2485 	goto out;
2486 
2487       jump = BB_END (final_dest_bb);
2488       cond = get_condition (jump, &move_before, true, false);
2489       if (cond == NULL_RTX)
2490 	{
2491 	  if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2492 	    move_before = prev_nonnote_nondebug_insn (jump);
2493 	  else
2494 	    move_before = jump;
2495 	}
2496     }
2497 
2498   do
2499     {
2500       rtx_insn *move_upto;
2501       moveall = can_move_insns_across (currptr[0], e0_last_head,
2502 				       move_before, jump, e0->dest, live_union,
2503 				       NULL, &move_upto);
2504       if (!moveall && move_upto == NULL_RTX)
2505 	{
2506 	  if (jump == move_before)
2507 	    break;
2508 
2509 	  /* Try again, using a different insertion point.  */
2510 	  move_before = jump;
2511 
2512 	  /* Don't try moving before a cc0 user, as that may invalidate
2513 	     the cc0.  */
2514 	  if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2515 	    break;
2516 
2517 	  continue;
2518 	}
2519 
2520       if (final_dest_bb && !moveall)
2521 	/* We haven't checked whether a partial move would be OK for the first
2522 	   move, so we have to fail this case.  */
2523 	break;
2524 
2525       changed = true;
2526       for (;;)
2527 	{
2528 	  if (currptr[0] == move_upto)
2529 	    break;
2530 	  for (ix = 0; ix < nedges; ix++)
2531 	    {
2532 	      rtx_insn *curr = currptr[ix];
2533 	      do
2534 		curr = NEXT_INSN (curr);
2535 	      while (!NONDEBUG_INSN_P (curr));
2536 	      currptr[ix] = curr;
2537 	    }
2538 	}
2539 
2540       /* If we can't currently move all of the identical insns, remember
2541 	 each insn after the range that we'll merge.  */
2542       if (!moveall)
2543 	for (ix = 0; ix < nedges; ix++)
2544 	  {
2545 	    rtx_insn *curr = currptr[ix];
2546 	    do
2547 	      curr = NEXT_INSN (curr);
2548 	    while (!NONDEBUG_INSN_P (curr));
2549 	    nextptr[ix] = curr;
2550 	  }
2551 
2552       reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before));
2553       df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest);
2554       if (final_dest_bb != NULL)
2555 	df_set_bb_dirty (final_dest_bb);
2556       df_set_bb_dirty (bb);
2557       for (ix = 1; ix < nedges; ix++)
2558 	{
2559 	  df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest);
2560 	  delete_insn_chain (headptr[ix], currptr[ix], false);
2561 	}
2562       if (!moveall)
2563 	{
2564 	  if (jump == move_before)
2565 	    break;
2566 
2567 	  /* For the unmerged insns, try a different insertion point.  */
2568 	  move_before = jump;
2569 
2570 	  /* Don't try moving before a cc0 user, as that may invalidate
2571 	     the cc0.  */
2572 	  if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2573 	    break;
2574 
2575 	  for (ix = 0; ix < nedges; ix++)
2576 	    currptr[ix] = headptr[ix] = nextptr[ix];
2577 	}
2578     }
2579   while (!moveall);
2580 
2581  out:
2582   free (currptr);
2583   free (headptr);
2584   free (nextptr);
2585 
2586   crossjumps_occurred |= changed;
2587 
2588   return changed;
2589 }
2590 
2591 /* Return true if BB contains just bb note, or bb note followed
2592    by only DEBUG_INSNs.  */
2593 
2594 static bool
2595 trivially_empty_bb_p (basic_block bb)
2596 {
2597   rtx_insn *insn = BB_END (bb);
2598 
2599   while (1)
2600     {
2601       if (insn == BB_HEAD (bb))
2602 	return true;
2603       if (!DEBUG_INSN_P (insn))
2604 	return false;
2605       insn = PREV_INSN (insn);
2606     }
2607 }
2608 
2609 /* Return true if BB contains just a return and possibly a USE of the
2610    return value.  Fill in *RET and *USE with the return and use insns
2611    if any found, otherwise NULL.  All CLOBBERs are ignored.  */
2612 
2613 static bool
2614 bb_is_just_return (basic_block bb, rtx_insn **ret, rtx_insn **use)
2615 {
2616   *ret = *use = NULL;
2617   rtx_insn *insn;
2618 
2619   if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
2620     return false;
2621 
2622   FOR_BB_INSNS (bb, insn)
2623     if (NONDEBUG_INSN_P (insn))
2624       {
2625 	rtx pat = PATTERN (insn);
2626 
2627 	if (!*ret && ANY_RETURN_P (pat))
2628 	  *ret = insn;
2629 	else if (!*ret && !*use && GET_CODE (pat) == USE
2630 	    && REG_P (XEXP (pat, 0))
2631 	    && REG_FUNCTION_VALUE_P (XEXP (pat, 0)))
2632 	  *use = insn;
2633 	else if (GET_CODE (pat) != CLOBBER)
2634 	  return false;
2635       }
2636 
2637   return !!*ret;
2638 }
2639 
2640 /* Do simple CFG optimizations - basic block merging, simplifying of jump
2641    instructions etc.  Return nonzero if changes were made.  */
2642 
2643 static bool
2644 try_optimize_cfg (int mode)
2645 {
2646   bool changed_overall = false;
2647   bool changed;
2648   int iterations = 0;
2649   basic_block bb, b, next;
2650 
2651   if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
2652     clear_bb_flags ();
2653 
2654   crossjumps_occurred = false;
2655 
2656   FOR_EACH_BB_FN (bb, cfun)
2657     update_forwarder_flag (bb);
2658 
2659   if (! targetm.cannot_modify_jumps_p ())
2660     {
2661       first_pass = true;
2662       /* Attempt to merge blocks as made possible by edge removal.  If
2663 	 a block has only one successor, and the successor has only
2664 	 one predecessor, they may be combined.  */
2665       do
2666 	{
2667 	  block_was_dirty = false;
2668 	  changed = false;
2669 	  iterations++;
2670 
2671 	  if (dump_file)
2672 	    fprintf (dump_file,
2673 		     "\n\ntry_optimize_cfg iteration %i\n\n",
2674 		     iterations);
2675 
2676 	  for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
2677 	       != EXIT_BLOCK_PTR_FOR_FN (cfun);)
2678 	    {
2679 	      basic_block c;
2680 	      edge s;
2681 	      bool changed_here = false;
2682 
2683 	      /* Delete trivially dead basic blocks.  This is either
2684 		 blocks with no predecessors, or empty blocks with no
2685 		 successors.  However if the empty block with no
2686 		 successors is the successor of the ENTRY_BLOCK, it is
2687 		 kept.  This ensures that the ENTRY_BLOCK will have a
2688 		 successor which is a precondition for many RTL
2689 		 passes.  Empty blocks may result from expanding
2690 		 __builtin_unreachable ().  */
2691 	      if (EDGE_COUNT (b->preds) == 0
2692 		  || (EDGE_COUNT (b->succs) == 0
2693 		      && trivially_empty_bb_p (b)
2694 		      && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->dest
2695 		      != b))
2696 		{
2697 		  c = b->prev_bb;
2698 		  if (EDGE_COUNT (b->preds) > 0)
2699 		    {
2700 		      edge e;
2701 		      edge_iterator ei;
2702 
2703 		      if (current_ir_type () == IR_RTL_CFGLAYOUT)
2704 			{
2705 			  if (BB_FOOTER (b)
2706 			      && BARRIER_P (BB_FOOTER (b)))
2707 			    FOR_EACH_EDGE (e, ei, b->preds)
2708 			      if ((e->flags & EDGE_FALLTHRU)
2709 				  && BB_FOOTER (e->src) == NULL)
2710 				{
2711 				  if (BB_FOOTER (b))
2712 				    {
2713 				      BB_FOOTER (e->src) = BB_FOOTER (b);
2714 				      BB_FOOTER (b) = NULL;
2715 				    }
2716 				  else
2717 				    {
2718 				      start_sequence ();
2719 				      BB_FOOTER (e->src) = emit_barrier ();
2720 				      end_sequence ();
2721 				    }
2722 				}
2723 			}
2724 		      else
2725 			{
2726 			  rtx_insn *last = get_last_bb_insn (b);
2727 			  if (last && BARRIER_P (last))
2728 			    FOR_EACH_EDGE (e, ei, b->preds)
2729 			      if ((e->flags & EDGE_FALLTHRU))
2730 				emit_barrier_after (BB_END (e->src));
2731 			}
2732 		    }
2733 		  delete_basic_block (b);
2734 		  changed = true;
2735 		  /* Avoid trying to remove the exit block.  */
2736 		  b = (c == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? c->next_bb : c);
2737 		  continue;
2738 		}
2739 
2740 	      /* Remove code labels no longer used.  */
2741 	      if (single_pred_p (b)
2742 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2743 		  && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
2744 		  && LABEL_P (BB_HEAD (b))
2745 		  && !LABEL_PRESERVE_P (BB_HEAD (b))
2746 		  /* If the previous block ends with a branch to this
2747 		     block, we can't delete the label.  Normally this
2748 		     is a condjump that is yet to be simplified, but
2749 		     if CASE_DROPS_THRU, this can be a tablejump with
2750 		     some element going to the same place as the
2751 		     default (fallthru).  */
2752 		  && (single_pred (b) == ENTRY_BLOCK_PTR_FOR_FN (cfun)
2753 		      || !JUMP_P (BB_END (single_pred (b)))
2754 		      || ! label_is_jump_target_p (BB_HEAD (b),
2755 						   BB_END (single_pred (b)))))
2756 		{
2757 		  delete_insn (BB_HEAD (b));
2758 		  if (dump_file)
2759 		    fprintf (dump_file, "Deleted label in block %i.\n",
2760 			     b->index);
2761 		}
2762 
2763 	      /* If we fall through an empty block, we can remove it.  */
2764 	      if (!(mode & (CLEANUP_CFGLAYOUT | CLEANUP_NO_INSN_DEL))
2765 		  && single_pred_p (b)
2766 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2767 		  && !LABEL_P (BB_HEAD (b))
2768 		  && FORWARDER_BLOCK_P (b)
2769 		  /* Note that forwarder_block_p true ensures that
2770 		     there is a successor for this block.  */
2771 		  && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2772 		  && n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS + 1)
2773 		{
2774 		  if (dump_file)
2775 		    fprintf (dump_file,
2776 			     "Deleting fallthru block %i.\n",
2777 			     b->index);
2778 
2779 		  c = ((b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2780 		       ? b->next_bb : b->prev_bb);
2781 		  redirect_edge_succ_nodup (single_pred_edge (b),
2782 					    single_succ (b));
2783 		  delete_basic_block (b);
2784 		  changed = true;
2785 		  b = c;
2786 		  continue;
2787 		}
2788 
2789 	      /* Merge B with its single successor, if any.  */
2790 	      if (single_succ_p (b)
2791 		  && (s = single_succ_edge (b))
2792 		  && !(s->flags & EDGE_COMPLEX)
2793 		  && (c = s->dest) != EXIT_BLOCK_PTR_FOR_FN (cfun)
2794 		  && single_pred_p (c)
2795 		  && b != c)
2796 		{
2797 		  /* When not in cfg_layout mode use code aware of reordering
2798 		     INSN.  This code possibly creates new basic blocks so it
2799 		     does not fit merge_blocks interface and is kept here in
2800 		     hope that it will become useless once more of compiler
2801 		     is transformed to use cfg_layout mode.  */
2802 
2803 		  if ((mode & CLEANUP_CFGLAYOUT)
2804 		      && can_merge_blocks_p (b, c))
2805 		    {
2806 		      merge_blocks (b, c);
2807 		      update_forwarder_flag (b);
2808 		      changed_here = true;
2809 		    }
2810 		  else if (!(mode & CLEANUP_CFGLAYOUT)
2811 			   /* If the jump insn has side effects,
2812 			      we can't kill the edge.  */
2813 			   && (!JUMP_P (BB_END (b))
2814 			       || (reload_completed
2815 				   ? simplejump_p (BB_END (b))
2816 				   : (onlyjump_p (BB_END (b))
2817 				      && !tablejump_p (BB_END (b),
2818 						       NULL, NULL))))
2819 			   && (next = merge_blocks_move (s, b, c, mode)))
2820 		      {
2821 			b = next;
2822 			changed_here = true;
2823 		      }
2824 		}
2825 
2826 	      /* Try to change a branch to a return to just that return.  */
2827 	      rtx_insn *ret, *use;
2828 	      if (single_succ_p (b)
2829 		  && onlyjump_p (BB_END (b))
2830 		  && bb_is_just_return (single_succ (b), &ret, &use))
2831 		{
2832 		  if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2833 				     PATTERN (ret), 0))
2834 		    {
2835 		      if (use)
2836 			emit_insn_before (copy_insn (PATTERN (use)),
2837 					  BB_END (b));
2838 		      if (dump_file)
2839 			fprintf (dump_file, "Changed jump %d->%d to return.\n",
2840 				 b->index, single_succ (b)->index);
2841 		      redirect_edge_succ (single_succ_edge (b),
2842 					  EXIT_BLOCK_PTR_FOR_FN (cfun));
2843 		      single_succ_edge (b)->flags &= ~EDGE_CROSSING;
2844 		      changed_here = true;
2845 		    }
2846 		}
2847 
2848 	      /* Try to change a conditional branch to a return to the
2849 		 respective conditional return.  */
2850 	      if (EDGE_COUNT (b->succs) == 2
2851 		  && any_condjump_p (BB_END (b))
2852 		  && bb_is_just_return (BRANCH_EDGE (b)->dest, &ret, &use))
2853 		{
2854 		  if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2855 				     PATTERN (ret), 0))
2856 		    {
2857 		      if (use)
2858 			emit_insn_before (copy_insn (PATTERN (use)),
2859 					  BB_END (b));
2860 		      if (dump_file)
2861 			fprintf (dump_file, "Changed conditional jump %d->%d "
2862 				 "to conditional return.\n",
2863 				 b->index, BRANCH_EDGE (b)->dest->index);
2864 		      redirect_edge_succ (BRANCH_EDGE (b),
2865 					  EXIT_BLOCK_PTR_FOR_FN (cfun));
2866 		      BRANCH_EDGE (b)->flags &= ~EDGE_CROSSING;
2867 		      changed_here = true;
2868 		    }
2869 		}
2870 
2871 	      /* Try to flip a conditional branch that falls through to
2872 		 a return so that it becomes a conditional return and a
2873 		 new jump to the original branch target.  */
2874 	      if (EDGE_COUNT (b->succs) == 2
2875 		  && BRANCH_EDGE (b)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
2876 		  && any_condjump_p (BB_END (b))
2877 		  && bb_is_just_return (FALLTHRU_EDGE (b)->dest, &ret, &use))
2878 		{
2879 		  if (invert_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2880 				   JUMP_LABEL (BB_END (b)), 0))
2881 		    {
2882 		      basic_block new_ft = BRANCH_EDGE (b)->dest;
2883 		      if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2884 					 PATTERN (ret), 0))
2885 			{
2886 			  if (use)
2887 			    emit_insn_before (copy_insn (PATTERN (use)),
2888 					      BB_END (b));
2889 			  if (dump_file)
2890 			    fprintf (dump_file, "Changed conditional jump "
2891 				     "%d->%d to conditional return, adding "
2892 				     "fall-through jump.\n",
2893 				     b->index, BRANCH_EDGE (b)->dest->index);
2894 			  redirect_edge_succ (BRANCH_EDGE (b),
2895 					      EXIT_BLOCK_PTR_FOR_FN (cfun));
2896 			  BRANCH_EDGE (b)->flags &= ~EDGE_CROSSING;
2897 			  std::swap (BRANCH_EDGE (b)->probability,
2898 				     FALLTHRU_EDGE (b)->probability);
2899 			  update_br_prob_note (b);
2900 			  basic_block jb = force_nonfallthru (FALLTHRU_EDGE (b));
2901 			  notice_new_block (jb);
2902 			  if (!redirect_jump (as_a <rtx_jump_insn *> (BB_END (jb)),
2903 					      block_label (new_ft), 0))
2904 			    gcc_unreachable ();
2905 			  redirect_edge_succ (single_succ_edge (jb), new_ft);
2906 			  changed_here = true;
2907 			}
2908 		      else
2909 			{
2910 			  /* Invert the jump back to what it was.  This should
2911 			     never fail.  */
2912 			  if (!invert_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2913 					    JUMP_LABEL (BB_END (b)), 0))
2914 			    gcc_unreachable ();
2915 			}
2916 		    }
2917 		}
2918 
2919 	      /* Simplify branch over branch.  */
2920 	      if ((mode & CLEANUP_EXPENSIVE)
2921 		   && !(mode & CLEANUP_CFGLAYOUT)
2922 		   && try_simplify_condjump (b))
2923 		changed_here = true;
2924 
2925 	      /* If B has a single outgoing edge, but uses a
2926 		 non-trivial jump instruction without side-effects, we
2927 		 can either delete the jump entirely, or replace it
2928 		 with a simple unconditional jump.  */
2929 	      if (single_succ_p (b)
2930 		  && single_succ (b) != EXIT_BLOCK_PTR_FOR_FN (cfun)
2931 		  && onlyjump_p (BB_END (b))
2932 		  && !CROSSING_JUMP_P (BB_END (b))
2933 		  && try_redirect_by_replacing_jump (single_succ_edge (b),
2934 						     single_succ (b),
2935 						     (mode & CLEANUP_CFGLAYOUT) != 0))
2936 		{
2937 		  update_forwarder_flag (b);
2938 		  changed_here = true;
2939 		}
2940 
2941 	      /* Simplify branch to branch.  */
2942 	      if (try_forward_edges (mode, b))
2943 		{
2944 		  update_forwarder_flag (b);
2945 		  changed_here = true;
2946 		}
2947 
2948 	      /* Look for shared code between blocks.  */
2949 	      if ((mode & CLEANUP_CROSSJUMP)
2950 		  && try_crossjump_bb (mode, b))
2951 		changed_here = true;
2952 
2953 	      if ((mode & CLEANUP_CROSSJUMP)
2954 		  /* This can lengthen register lifetimes.  Do it only after
2955 		     reload.  */
2956 		  && reload_completed
2957 		  && try_head_merge_bb (b))
2958 		changed_here = true;
2959 
2960 	      /* Don't get confused by the index shift caused by
2961 		 deleting blocks.  */
2962 	      if (!changed_here)
2963 		b = b->next_bb;
2964 	      else
2965 		changed = true;
2966 	    }
2967 
2968 	  if ((mode & CLEANUP_CROSSJUMP)
2969 	      && try_crossjump_bb (mode, EXIT_BLOCK_PTR_FOR_FN (cfun)))
2970 	    changed = true;
2971 
2972 	  if (block_was_dirty)
2973 	    {
2974 	      /* This should only be set by head-merging.  */
2975 	      gcc_assert (mode & CLEANUP_CROSSJUMP);
2976 	      df_analyze ();
2977 	    }
2978 
2979 	  if (changed)
2980             {
2981               /* Edge forwarding in particular can cause hot blocks previously
2982                  reached by both hot and cold blocks to become dominated only
2983                  by cold blocks. This will cause the verification below to fail,
2984                  and lead to now cold code in the hot section. This is not easy
2985                  to detect and fix during edge forwarding, and in some cases
2986                  is only visible after newly unreachable blocks are deleted,
2987                  which will be done in fixup_partitions.  */
2988 	      if ((mode & CLEANUP_NO_PARTITIONING) == 0)
2989 		{
2990 		  fixup_partitions ();
2991 	          checking_verify_flow_info ();
2992 		}
2993             }
2994 
2995 	  changed_overall |= changed;
2996 	  first_pass = false;
2997 	}
2998       while (changed);
2999     }
3000 
3001   FOR_ALL_BB_FN (b, cfun)
3002     b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
3003 
3004   return changed_overall;
3005 }
3006 
3007 /* Delete all unreachable basic blocks.  */
3008 
3009 bool
3010 delete_unreachable_blocks (void)
3011 {
3012   bool changed = false;
3013   basic_block b, prev_bb;
3014 
3015   find_unreachable_blocks ();
3016 
3017   /* When we're in GIMPLE mode and there may be debug bind insns, we
3018      should delete blocks in reverse dominator order, so as to get a
3019      chance to substitute all released DEFs into debug bind stmts.  If
3020      we don't have dominators information, walking blocks backward
3021      gets us a better chance of retaining most debug information than
3022      otherwise.  */
3023   if (MAY_HAVE_DEBUG_BIND_INSNS && current_ir_type () == IR_GIMPLE
3024       && dom_info_available_p (CDI_DOMINATORS))
3025     {
3026       for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
3027 	   b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
3028 	{
3029 	  prev_bb = b->prev_bb;
3030 
3031 	  if (!(b->flags & BB_REACHABLE))
3032 	    {
3033 	      /* Speed up the removal of blocks that don't dominate
3034 		 others.  Walking backwards, this should be the common
3035 		 case.  */
3036 	      if (!first_dom_son (CDI_DOMINATORS, b))
3037 		delete_basic_block (b);
3038 	      else
3039 		{
3040 		  vec<basic_block> h
3041 		    = get_all_dominated_blocks (CDI_DOMINATORS, b);
3042 
3043 		  while (h.length ())
3044 		    {
3045 		      b = h.pop ();
3046 
3047 		      prev_bb = b->prev_bb;
3048 
3049 		      gcc_assert (!(b->flags & BB_REACHABLE));
3050 
3051 		      delete_basic_block (b);
3052 		    }
3053 
3054 		  h.release ();
3055 		}
3056 
3057 	      changed = true;
3058 	    }
3059 	}
3060     }
3061   else
3062     {
3063       for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
3064 	   b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
3065 	{
3066 	  prev_bb = b->prev_bb;
3067 
3068 	  if (!(b->flags & BB_REACHABLE))
3069 	    {
3070 	      delete_basic_block (b);
3071 	      changed = true;
3072 	    }
3073 	}
3074     }
3075 
3076   if (changed)
3077     tidy_fallthru_edges ();
3078   return changed;
3079 }
3080 
3081 /* Delete any jump tables never referenced.  We can't delete them at the
3082    time of removing tablejump insn as they are referenced by the preceding
3083    insns computing the destination, so we delay deleting and garbagecollect
3084    them once life information is computed.  */
3085 void
3086 delete_dead_jumptables (void)
3087 {
3088   basic_block bb;
3089 
3090   /* A dead jump table does not belong to any basic block.  Scan insns
3091      between two adjacent basic blocks.  */
3092   FOR_EACH_BB_FN (bb, cfun)
3093     {
3094       rtx_insn *insn, *next;
3095 
3096       for (insn = NEXT_INSN (BB_END (bb));
3097 	   insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
3098 	   insn = next)
3099 	{
3100 	  next = NEXT_INSN (insn);
3101 	  if (LABEL_P (insn)
3102 	      && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
3103 	      && JUMP_TABLE_DATA_P (next))
3104 	    {
3105 	      rtx_insn *label = insn, *jump = next;
3106 
3107 	      if (dump_file)
3108 		fprintf (dump_file, "Dead jumptable %i removed\n",
3109 			 INSN_UID (insn));
3110 
3111 	      next = NEXT_INSN (next);
3112 	      delete_insn (jump);
3113 	      delete_insn (label);
3114 	    }
3115 	}
3116     }
3117 }
3118 
3119 
3120 /* Tidy the CFG by deleting unreachable code and whatnot.  */
3121 
3122 bool
3123 cleanup_cfg (int mode)
3124 {
3125   bool changed = false;
3126 
3127   /* Set the cfglayout mode flag here.  We could update all the callers
3128      but that is just inconvenient, especially given that we eventually
3129      want to have cfglayout mode as the default.  */
3130   if (current_ir_type () == IR_RTL_CFGLAYOUT)
3131     mode |= CLEANUP_CFGLAYOUT;
3132 
3133   timevar_push (TV_CLEANUP_CFG);
3134   if (delete_unreachable_blocks ())
3135     {
3136       changed = true;
3137       /* We've possibly created trivially dead code.  Cleanup it right
3138 	 now to introduce more opportunities for try_optimize_cfg.  */
3139       if (!(mode & (CLEANUP_NO_INSN_DEL))
3140 	  && !reload_completed)
3141 	delete_trivially_dead_insns (get_insns (), max_reg_num ());
3142     }
3143 
3144   compact_blocks ();
3145 
3146   /* To tail-merge blocks ending in the same noreturn function (e.g.
3147      a call to abort) we have to insert fake edges to exit.  Do this
3148      here once.  The fake edges do not interfere with any other CFG
3149      cleanups.  */
3150   if (mode & CLEANUP_CROSSJUMP)
3151     add_noreturn_fake_exit_edges ();
3152 
3153   if (!dbg_cnt (cfg_cleanup))
3154     return changed;
3155 
3156   while (try_optimize_cfg (mode))
3157     {
3158       delete_unreachable_blocks (), changed = true;
3159       if (!(mode & CLEANUP_NO_INSN_DEL))
3160 	{
3161 	  /* Try to remove some trivially dead insns when doing an expensive
3162 	     cleanup.  But delete_trivially_dead_insns doesn't work after
3163 	     reload (it only handles pseudos) and run_fast_dce is too costly
3164 	     to run in every iteration.
3165 
3166 	     For effective cross jumping, we really want to run a fast DCE to
3167 	     clean up any dead conditions, or they get in the way of performing
3168 	     useful tail merges.
3169 
3170 	     Other transformations in cleanup_cfg are not so sensitive to dead
3171 	     code, so delete_trivially_dead_insns or even doing nothing at all
3172 	     is good enough.  */
3173 	  if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
3174 	      && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
3175 	    break;
3176 	  if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occurred)
3177 	    run_fast_dce ();
3178 	}
3179       else
3180 	break;
3181     }
3182 
3183   if (mode & CLEANUP_CROSSJUMP)
3184     remove_fake_exit_edges ();
3185 
3186   /* Don't call delete_dead_jumptables in cfglayout mode, because
3187      that function assumes that jump tables are in the insns stream.
3188      But we also don't _have_ to delete dead jumptables in cfglayout
3189      mode because we shouldn't even be looking at things that are
3190      not in a basic block.  Dead jumptables are cleaned up when
3191      going out of cfglayout mode.  */
3192   if (!(mode & CLEANUP_CFGLAYOUT))
3193     delete_dead_jumptables ();
3194 
3195   /* ???  We probably do this way too often.  */
3196   if (current_loops
3197       && (changed
3198 	  || (mode & CLEANUP_CFG_CHANGED)))
3199     {
3200       timevar_push (TV_REPAIR_LOOPS);
3201       /* The above doesn't preserve dominance info if available.  */
3202       gcc_assert (!dom_info_available_p (CDI_DOMINATORS));
3203       calculate_dominance_info (CDI_DOMINATORS);
3204       fix_loop_structure (NULL);
3205       free_dominance_info (CDI_DOMINATORS);
3206       timevar_pop (TV_REPAIR_LOOPS);
3207     }
3208 
3209   timevar_pop (TV_CLEANUP_CFG);
3210 
3211   return changed;
3212 }
3213 
3214 namespace {
3215 
3216 const pass_data pass_data_jump =
3217 {
3218   RTL_PASS, /* type */
3219   "jump", /* name */
3220   OPTGROUP_NONE, /* optinfo_flags */
3221   TV_JUMP, /* tv_id */
3222   0, /* properties_required */
3223   0, /* properties_provided */
3224   0, /* properties_destroyed */
3225   0, /* todo_flags_start */
3226   0, /* todo_flags_finish */
3227 };
3228 
3229 class pass_jump : public rtl_opt_pass
3230 {
3231 public:
3232   pass_jump (gcc::context *ctxt)
3233     : rtl_opt_pass (pass_data_jump, ctxt)
3234   {}
3235 
3236   /* opt_pass methods: */
3237   virtual unsigned int execute (function *);
3238 
3239 }; // class pass_jump
3240 
3241 unsigned int
3242 pass_jump::execute (function *)
3243 {
3244   delete_trivially_dead_insns (get_insns (), max_reg_num ());
3245   if (dump_file)
3246     dump_flow_info (dump_file, dump_flags);
3247   cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
3248 	       | (flag_thread_jumps ? CLEANUP_THREADING : 0));
3249   return 0;
3250 }
3251 
3252 } // anon namespace
3253 
3254 rtl_opt_pass *
3255 make_pass_jump (gcc::context *ctxt)
3256 {
3257   return new pass_jump (ctxt);
3258 }
3259 
3260 namespace {
3261 
3262 const pass_data pass_data_jump2 =
3263 {
3264   RTL_PASS, /* type */
3265   "jump2", /* name */
3266   OPTGROUP_NONE, /* optinfo_flags */
3267   TV_JUMP, /* tv_id */
3268   0, /* properties_required */
3269   0, /* properties_provided */
3270   0, /* properties_destroyed */
3271   0, /* todo_flags_start */
3272   0, /* todo_flags_finish */
3273 };
3274 
3275 class pass_jump2 : public rtl_opt_pass
3276 {
3277 public:
3278   pass_jump2 (gcc::context *ctxt)
3279     : rtl_opt_pass (pass_data_jump2, ctxt)
3280   {}
3281 
3282   /* opt_pass methods: */
3283   virtual unsigned int execute (function *)
3284     {
3285       cleanup_cfg (flag_crossjumping ? CLEANUP_CROSSJUMP : 0);
3286       return 0;
3287     }
3288 
3289 }; // class pass_jump2
3290 
3291 } // anon namespace
3292 
3293 rtl_opt_pass *
3294 make_pass_jump2 (gcc::context *ctxt)
3295 {
3296   return new pass_jump2 (ctxt);
3297 }
3298