xref: /dragonfly/contrib/gcc-8.0/gcc/cfgcleanup.c (revision 58e805e6)
138fd1498Szrj /* Control flow optimization code for GNU compiler.
238fd1498Szrj    Copyright (C) 1987-2018 Free Software Foundation, Inc.
338fd1498Szrj 
438fd1498Szrj This file is part of GCC.
538fd1498Szrj 
638fd1498Szrj GCC is free software; you can redistribute it and/or modify it under
738fd1498Szrj the terms of the GNU General Public License as published by the Free
838fd1498Szrj Software Foundation; either version 3, or (at your option) any later
938fd1498Szrj version.
1038fd1498Szrj 
1138fd1498Szrj GCC is distributed in the hope that it will be useful, but WITHOUT ANY
1238fd1498Szrj WARRANTY; without even the implied warranty of MERCHANTABILITY or
1338fd1498Szrj FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
1438fd1498Szrj for more details.
1538fd1498Szrj 
1638fd1498Szrj You should have received a copy of the GNU General Public License
1738fd1498Szrj along with GCC; see the file COPYING3.  If not see
1838fd1498Szrj <http://www.gnu.org/licenses/>.  */
1938fd1498Szrj 
2038fd1498Szrj /* This file contains optimizer of the control flow.  The main entry point is
2138fd1498Szrj    cleanup_cfg.  Following optimizations are performed:
2238fd1498Szrj 
2338fd1498Szrj    - Unreachable blocks removal
2438fd1498Szrj    - Edge forwarding (edge to the forwarder block is forwarded to its
2538fd1498Szrj      successor.  Simplification of the branch instruction is performed by
2638fd1498Szrj      underlying infrastructure so branch can be converted to simplejump or
2738fd1498Szrj      eliminated).
2838fd1498Szrj    - Cross jumping (tail merging)
2938fd1498Szrj    - Conditional jump-around-simplejump simplification
3038fd1498Szrj    - Basic block merging.  */
3138fd1498Szrj 
3238fd1498Szrj #include "config.h"
3338fd1498Szrj #include "system.h"
3438fd1498Szrj #include "coretypes.h"
3538fd1498Szrj #include "backend.h"
3638fd1498Szrj #include "target.h"
3738fd1498Szrj #include "rtl.h"
3838fd1498Szrj #include "tree.h"
3938fd1498Szrj #include "cfghooks.h"
4038fd1498Szrj #include "df.h"
4138fd1498Szrj #include "memmodel.h"
4238fd1498Szrj #include "tm_p.h"
4338fd1498Szrj #include "insn-config.h"
4438fd1498Szrj #include "emit-rtl.h"
4538fd1498Szrj #include "cselib.h"
4638fd1498Szrj #include "params.h"
4738fd1498Szrj #include "tree-pass.h"
4838fd1498Szrj #include "cfgloop.h"
4938fd1498Szrj #include "cfgrtl.h"
5038fd1498Szrj #include "cfganal.h"
5138fd1498Szrj #include "cfgbuild.h"
5238fd1498Szrj #include "cfgcleanup.h"
5338fd1498Szrj #include "dce.h"
5438fd1498Szrj #include "dbgcnt.h"
5538fd1498Szrj #include "rtl-iter.h"
5638fd1498Szrj 
5738fd1498Szrj #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
5838fd1498Szrj 
5938fd1498Szrj /* Set to true when we are running first pass of try_optimize_cfg loop.  */
6038fd1498Szrj static bool first_pass;
6138fd1498Szrj 
6238fd1498Szrj /* Set to true if crossjumps occurred in the latest run of try_optimize_cfg.  */
6338fd1498Szrj static bool crossjumps_occurred;
6438fd1498Szrj 
6538fd1498Szrj /* Set to true if we couldn't run an optimization due to stale liveness
6638fd1498Szrj    information; we should run df_analyze to enable more opportunities.  */
6738fd1498Szrj static bool block_was_dirty;
6838fd1498Szrj 
6938fd1498Szrj static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction);
7038fd1498Szrj static bool try_crossjump_bb (int, basic_block);
7138fd1498Szrj static bool outgoing_edges_match (int, basic_block, basic_block);
7238fd1498Szrj static enum replace_direction old_insns_match_p (int, rtx_insn *, rtx_insn *);
7338fd1498Szrj 
7438fd1498Szrj static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
7538fd1498Szrj static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
7638fd1498Szrj static bool try_optimize_cfg (int);
7738fd1498Szrj static bool try_simplify_condjump (basic_block);
7838fd1498Szrj static bool try_forward_edges (int, basic_block);
7938fd1498Szrj static edge thread_jump (edge, basic_block);
8038fd1498Szrj static bool mark_effect (rtx, bitmap);
8138fd1498Szrj static void notice_new_block (basic_block);
8238fd1498Szrj static void update_forwarder_flag (basic_block);
8338fd1498Szrj static void merge_memattrs (rtx, rtx);
8438fd1498Szrj 
8538fd1498Szrj /* Set flags for newly created block.  */
8638fd1498Szrj 
8738fd1498Szrj static void
notice_new_block(basic_block bb)8838fd1498Szrj notice_new_block (basic_block bb)
8938fd1498Szrj {
9038fd1498Szrj   if (!bb)
9138fd1498Szrj     return;
9238fd1498Szrj 
9338fd1498Szrj   if (forwarder_block_p (bb))
9438fd1498Szrj     bb->flags |= BB_FORWARDER_BLOCK;
9538fd1498Szrj }
9638fd1498Szrj 
9738fd1498Szrj /* Recompute forwarder flag after block has been modified.  */
9838fd1498Szrj 
9938fd1498Szrj static void
update_forwarder_flag(basic_block bb)10038fd1498Szrj update_forwarder_flag (basic_block bb)
10138fd1498Szrj {
10238fd1498Szrj   if (forwarder_block_p (bb))
10338fd1498Szrj     bb->flags |= BB_FORWARDER_BLOCK;
10438fd1498Szrj   else
10538fd1498Szrj     bb->flags &= ~BB_FORWARDER_BLOCK;
10638fd1498Szrj }
10738fd1498Szrj 
10838fd1498Szrj /* Simplify a conditional jump around an unconditional jump.
10938fd1498Szrj    Return true if something changed.  */
11038fd1498Szrj 
11138fd1498Szrj static bool
try_simplify_condjump(basic_block cbranch_block)11238fd1498Szrj try_simplify_condjump (basic_block cbranch_block)
11338fd1498Szrj {
11438fd1498Szrj   basic_block jump_block, jump_dest_block, cbranch_dest_block;
11538fd1498Szrj   edge cbranch_jump_edge, cbranch_fallthru_edge;
11638fd1498Szrj   rtx_insn *cbranch_insn;
11738fd1498Szrj 
11838fd1498Szrj   /* Verify that there are exactly two successors.  */
11938fd1498Szrj   if (EDGE_COUNT (cbranch_block->succs) != 2)
12038fd1498Szrj     return false;
12138fd1498Szrj 
12238fd1498Szrj   /* Verify that we've got a normal conditional branch at the end
12338fd1498Szrj      of the block.  */
12438fd1498Szrj   cbranch_insn = BB_END (cbranch_block);
12538fd1498Szrj   if (!any_condjump_p (cbranch_insn))
12638fd1498Szrj     return false;
12738fd1498Szrj 
12838fd1498Szrj   cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
12938fd1498Szrj   cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
13038fd1498Szrj 
13138fd1498Szrj   /* The next block must not have multiple predecessors, must not
13238fd1498Szrj      be the last block in the function, and must contain just the
13338fd1498Szrj      unconditional jump.  */
13438fd1498Szrj   jump_block = cbranch_fallthru_edge->dest;
13538fd1498Szrj   if (!single_pred_p (jump_block)
13638fd1498Szrj       || jump_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
13738fd1498Szrj       || !FORWARDER_BLOCK_P (jump_block))
13838fd1498Szrj     return false;
13938fd1498Szrj   jump_dest_block = single_succ (jump_block);
14038fd1498Szrj 
14138fd1498Szrj   /* If we are partitioning hot/cold basic blocks, we don't want to
14238fd1498Szrj      mess up unconditional or indirect jumps that cross between hot
14338fd1498Szrj      and cold sections.
14438fd1498Szrj 
14538fd1498Szrj      Basic block partitioning may result in some jumps that appear to
14638fd1498Szrj      be optimizable (or blocks that appear to be mergeable), but which really
14738fd1498Szrj      must be left untouched (they are required to make it safely across
14838fd1498Szrj      partition boundaries).  See the comments at the top of
14938fd1498Szrj      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
15038fd1498Szrj 
15138fd1498Szrj   if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
15238fd1498Szrj       || (cbranch_jump_edge->flags & EDGE_CROSSING))
15338fd1498Szrj     return false;
15438fd1498Szrj 
15538fd1498Szrj   /* The conditional branch must target the block after the
15638fd1498Szrj      unconditional branch.  */
15738fd1498Szrj   cbranch_dest_block = cbranch_jump_edge->dest;
15838fd1498Szrj 
15938fd1498Szrj   if (cbranch_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
16038fd1498Szrj       || jump_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
16138fd1498Szrj       || !can_fallthru (jump_block, cbranch_dest_block))
16238fd1498Szrj     return false;
16338fd1498Szrj 
16438fd1498Szrj   /* Invert the conditional branch.  */
16538fd1498Szrj   if (!invert_jump (as_a <rtx_jump_insn *> (cbranch_insn),
16638fd1498Szrj 		    block_label (jump_dest_block), 0))
16738fd1498Szrj     return false;
16838fd1498Szrj 
16938fd1498Szrj   if (dump_file)
17038fd1498Szrj     fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
17138fd1498Szrj 	     INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
17238fd1498Szrj 
17338fd1498Szrj   /* Success.  Update the CFG to match.  Note that after this point
17438fd1498Szrj      the edge variable names appear backwards; the redirection is done
17538fd1498Szrj      this way to preserve edge profile data.  */
17638fd1498Szrj   cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
17738fd1498Szrj 						cbranch_dest_block);
17838fd1498Szrj   cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
17938fd1498Szrj 						    jump_dest_block);
18038fd1498Szrj   cbranch_jump_edge->flags |= EDGE_FALLTHRU;
18138fd1498Szrj   cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
18238fd1498Szrj   update_br_prob_note (cbranch_block);
18338fd1498Szrj 
18438fd1498Szrj   /* Delete the block with the unconditional jump, and clean up the mess.  */
18538fd1498Szrj   delete_basic_block (jump_block);
18638fd1498Szrj   tidy_fallthru_edge (cbranch_jump_edge);
18738fd1498Szrj   update_forwarder_flag (cbranch_block);
18838fd1498Szrj 
18938fd1498Szrj   return true;
19038fd1498Szrj }
19138fd1498Szrj 
19238fd1498Szrj /* Attempt to prove that operation is NOOP using CSElib or mark the effect
19338fd1498Szrj    on register.  Used by jump threading.  */
19438fd1498Szrj 
19538fd1498Szrj static bool
mark_effect(rtx exp,regset nonequal)19638fd1498Szrj mark_effect (rtx exp, regset nonequal)
19738fd1498Szrj {
19838fd1498Szrj   rtx dest;
19938fd1498Szrj   switch (GET_CODE (exp))
20038fd1498Szrj     {
20138fd1498Szrj       /* In case we do clobber the register, mark it as equal, as we know the
20238fd1498Szrj 	 value is dead so it don't have to match.  */
20338fd1498Szrj     case CLOBBER:
20438fd1498Szrj       dest = XEXP (exp, 0);
20538fd1498Szrj       if (REG_P (dest))
20638fd1498Szrj 	bitmap_clear_range (nonequal, REGNO (dest), REG_NREGS (dest));
20738fd1498Szrj       return false;
20838fd1498Szrj 
20938fd1498Szrj     case SET:
21038fd1498Szrj       if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
21138fd1498Szrj 	return false;
21238fd1498Szrj       dest = SET_DEST (exp);
21338fd1498Szrj       if (dest == pc_rtx)
21438fd1498Szrj 	return false;
21538fd1498Szrj       if (!REG_P (dest))
21638fd1498Szrj 	return true;
21738fd1498Szrj       bitmap_set_range (nonequal, REGNO (dest), REG_NREGS (dest));
21838fd1498Szrj       return false;
21938fd1498Szrj 
22038fd1498Szrj     default:
22138fd1498Szrj       return false;
22238fd1498Szrj     }
22338fd1498Szrj }
22438fd1498Szrj 
22538fd1498Szrj /* Return true if X contains a register in NONEQUAL.  */
22638fd1498Szrj static bool
mentions_nonequal_regs(const_rtx x,regset nonequal)22738fd1498Szrj mentions_nonequal_regs (const_rtx x, regset nonequal)
22838fd1498Szrj {
22938fd1498Szrj   subrtx_iterator::array_type array;
23038fd1498Szrj   FOR_EACH_SUBRTX (iter, array, x, NONCONST)
23138fd1498Szrj     {
23238fd1498Szrj       const_rtx x = *iter;
23338fd1498Szrj       if (REG_P (x))
23438fd1498Szrj 	{
23538fd1498Szrj 	  unsigned int end_regno = END_REGNO (x);
23638fd1498Szrj 	  for (unsigned int regno = REGNO (x); regno < end_regno; ++regno)
23738fd1498Szrj 	    if (REGNO_REG_SET_P (nonequal, regno))
23838fd1498Szrj 	      return true;
23938fd1498Szrj 	}
24038fd1498Szrj     }
24138fd1498Szrj   return false;
24238fd1498Szrj }
24338fd1498Szrj 
24438fd1498Szrj /* Attempt to prove that the basic block B will have no side effects and
24538fd1498Szrj    always continues in the same edge if reached via E.  Return the edge
24638fd1498Szrj    if exist, NULL otherwise.  */
24738fd1498Szrj 
24838fd1498Szrj static edge
thread_jump(edge e,basic_block b)24938fd1498Szrj thread_jump (edge e, basic_block b)
25038fd1498Szrj {
25138fd1498Szrj   rtx set1, set2, cond1, cond2;
25238fd1498Szrj   rtx_insn *insn;
25338fd1498Szrj   enum rtx_code code1, code2, reversed_code2;
25438fd1498Szrj   bool reverse1 = false;
25538fd1498Szrj   unsigned i;
25638fd1498Szrj   regset nonequal;
25738fd1498Szrj   bool failed = false;
25838fd1498Szrj   reg_set_iterator rsi;
25938fd1498Szrj 
26038fd1498Szrj   if (b->flags & BB_NONTHREADABLE_BLOCK)
26138fd1498Szrj     return NULL;
26238fd1498Szrj 
26338fd1498Szrj   /* At the moment, we do handle only conditional jumps, but later we may
26438fd1498Szrj      want to extend this code to tablejumps and others.  */
26538fd1498Szrj   if (EDGE_COUNT (e->src->succs) != 2)
26638fd1498Szrj     return NULL;
26738fd1498Szrj   if (EDGE_COUNT (b->succs) != 2)
26838fd1498Szrj     {
26938fd1498Szrj       b->flags |= BB_NONTHREADABLE_BLOCK;
27038fd1498Szrj       return NULL;
27138fd1498Szrj     }
27238fd1498Szrj 
27338fd1498Szrj   /* Second branch must end with onlyjump, as we will eliminate the jump.  */
27438fd1498Szrj   if (!any_condjump_p (BB_END (e->src)))
27538fd1498Szrj     return NULL;
27638fd1498Szrj 
27738fd1498Szrj   if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
27838fd1498Szrj     {
27938fd1498Szrj       b->flags |= BB_NONTHREADABLE_BLOCK;
28038fd1498Szrj       return NULL;
28138fd1498Szrj     }
28238fd1498Szrj 
28338fd1498Szrj   set1 = pc_set (BB_END (e->src));
28438fd1498Szrj   set2 = pc_set (BB_END (b));
28538fd1498Szrj   if (((e->flags & EDGE_FALLTHRU) != 0)
28638fd1498Szrj       != (XEXP (SET_SRC (set1), 1) == pc_rtx))
28738fd1498Szrj     reverse1 = true;
28838fd1498Szrj 
28938fd1498Szrj   cond1 = XEXP (SET_SRC (set1), 0);
29038fd1498Szrj   cond2 = XEXP (SET_SRC (set2), 0);
29138fd1498Szrj   if (reverse1)
29238fd1498Szrj     code1 = reversed_comparison_code (cond1, BB_END (e->src));
29338fd1498Szrj   else
29438fd1498Szrj     code1 = GET_CODE (cond1);
29538fd1498Szrj 
29638fd1498Szrj   code2 = GET_CODE (cond2);
29738fd1498Szrj   reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
29838fd1498Szrj 
29938fd1498Szrj   if (!comparison_dominates_p (code1, code2)
30038fd1498Szrj       && !comparison_dominates_p (code1, reversed_code2))
30138fd1498Szrj     return NULL;
30238fd1498Szrj 
30338fd1498Szrj   /* Ensure that the comparison operators are equivalent.
30438fd1498Szrj      ??? This is far too pessimistic.  We should allow swapped operands,
30538fd1498Szrj      different CCmodes, or for example comparisons for interval, that
30638fd1498Szrj      dominate even when operands are not equivalent.  */
30738fd1498Szrj   if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
30838fd1498Szrj       || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
30938fd1498Szrj     return NULL;
31038fd1498Szrj 
31138fd1498Szrj   /* Short circuit cases where block B contains some side effects, as we can't
31238fd1498Szrj      safely bypass it.  */
31338fd1498Szrj   for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
31438fd1498Szrj        insn = NEXT_INSN (insn))
31538fd1498Szrj     if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
31638fd1498Szrj       {
31738fd1498Szrj 	b->flags |= BB_NONTHREADABLE_BLOCK;
31838fd1498Szrj 	return NULL;
31938fd1498Szrj       }
32038fd1498Szrj 
32138fd1498Szrj   cselib_init (0);
32238fd1498Szrj 
32338fd1498Szrj   /* First process all values computed in the source basic block.  */
32438fd1498Szrj   for (insn = NEXT_INSN (BB_HEAD (e->src));
32538fd1498Szrj        insn != NEXT_INSN (BB_END (e->src));
32638fd1498Szrj        insn = NEXT_INSN (insn))
32738fd1498Szrj     if (INSN_P (insn))
32838fd1498Szrj       cselib_process_insn (insn);
32938fd1498Szrj 
33038fd1498Szrj   nonequal = BITMAP_ALLOC (NULL);
33138fd1498Szrj   CLEAR_REG_SET (nonequal);
33238fd1498Szrj 
33338fd1498Szrj   /* Now assume that we've continued by the edge E to B and continue
33438fd1498Szrj      processing as if it were same basic block.
33538fd1498Szrj      Our goal is to prove that whole block is an NOOP.  */
33638fd1498Szrj 
33738fd1498Szrj   for (insn = NEXT_INSN (BB_HEAD (b));
33838fd1498Szrj        insn != NEXT_INSN (BB_END (b)) && !failed;
33938fd1498Szrj        insn = NEXT_INSN (insn))
34038fd1498Szrj     {
34138fd1498Szrj       if (INSN_P (insn))
34238fd1498Szrj 	{
34338fd1498Szrj 	  rtx pat = PATTERN (insn);
34438fd1498Szrj 
34538fd1498Szrj 	  if (GET_CODE (pat) == PARALLEL)
34638fd1498Szrj 	    {
34738fd1498Szrj 	      for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
34838fd1498Szrj 		failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
34938fd1498Szrj 	    }
35038fd1498Szrj 	  else
35138fd1498Szrj 	    failed |= mark_effect (pat, nonequal);
35238fd1498Szrj 	}
35338fd1498Szrj 
35438fd1498Szrj       cselib_process_insn (insn);
35538fd1498Szrj     }
35638fd1498Szrj 
35738fd1498Szrj   /* Later we should clear nonequal of dead registers.  So far we don't
35838fd1498Szrj      have life information in cfg_cleanup.  */
35938fd1498Szrj   if (failed)
36038fd1498Szrj     {
36138fd1498Szrj       b->flags |= BB_NONTHREADABLE_BLOCK;
36238fd1498Szrj       goto failed_exit;
36338fd1498Szrj     }
36438fd1498Szrj 
36538fd1498Szrj   /* cond2 must not mention any register that is not equal to the
36638fd1498Szrj      former block.  */
36738fd1498Szrj   if (mentions_nonequal_regs (cond2, nonequal))
36838fd1498Szrj     goto failed_exit;
36938fd1498Szrj 
37038fd1498Szrj   EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
37138fd1498Szrj     goto failed_exit;
37238fd1498Szrj 
37338fd1498Szrj   BITMAP_FREE (nonequal);
37438fd1498Szrj   cselib_finish ();
37538fd1498Szrj   if ((comparison_dominates_p (code1, code2) != 0)
37638fd1498Szrj       != (XEXP (SET_SRC (set2), 1) == pc_rtx))
37738fd1498Szrj     return BRANCH_EDGE (b);
37838fd1498Szrj   else
37938fd1498Szrj     return FALLTHRU_EDGE (b);
38038fd1498Szrj 
38138fd1498Szrj failed_exit:
38238fd1498Szrj   BITMAP_FREE (nonequal);
38338fd1498Szrj   cselib_finish ();
38438fd1498Szrj   return NULL;
38538fd1498Szrj }
38638fd1498Szrj 
38738fd1498Szrj /* Attempt to forward edges leaving basic block B.
38838fd1498Szrj    Return true if successful.  */
38938fd1498Szrj 
39038fd1498Szrj static bool
try_forward_edges(int mode,basic_block b)39138fd1498Szrj try_forward_edges (int mode, basic_block b)
39238fd1498Szrj {
39338fd1498Szrj   bool changed = false;
39438fd1498Szrj   edge_iterator ei;
39538fd1498Szrj   edge e, *threaded_edges = NULL;
39638fd1498Szrj 
39738fd1498Szrj   for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
39838fd1498Szrj     {
39938fd1498Szrj       basic_block target, first;
40038fd1498Szrj       location_t goto_locus;
40138fd1498Szrj       int counter;
40238fd1498Szrj       bool threaded = false;
40338fd1498Szrj       int nthreaded_edges = 0;
40438fd1498Szrj       bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0;
40538fd1498Szrj       bool new_target_threaded = false;
40638fd1498Szrj 
40738fd1498Szrj       /* Skip complex edges because we don't know how to update them.
40838fd1498Szrj 
40938fd1498Szrj 	 Still handle fallthru edges, as we can succeed to forward fallthru
41038fd1498Szrj 	 edge to the same place as the branch edge of conditional branch
41138fd1498Szrj 	 and turn conditional branch to an unconditional branch.  */
41238fd1498Szrj       if (e->flags & EDGE_COMPLEX)
41338fd1498Szrj 	{
41438fd1498Szrj 	  ei_next (&ei);
41538fd1498Szrj 	  continue;
41638fd1498Szrj 	}
41738fd1498Szrj 
41838fd1498Szrj       target = first = e->dest;
41938fd1498Szrj       counter = NUM_FIXED_BLOCKS;
42038fd1498Szrj       goto_locus = e->goto_locus;
42138fd1498Szrj 
42238fd1498Szrj       while (counter < n_basic_blocks_for_fn (cfun))
42338fd1498Szrj 	{
42438fd1498Szrj 	  basic_block new_target = NULL;
42538fd1498Szrj 	  may_thread |= (target->flags & BB_MODIFIED) != 0;
42638fd1498Szrj 
42738fd1498Szrj 	  if (FORWARDER_BLOCK_P (target)
42838fd1498Szrj 	      && single_succ (target) != EXIT_BLOCK_PTR_FOR_FN (cfun))
42938fd1498Szrj 	    {
43038fd1498Szrj 	      /* Bypass trivial infinite loops.  */
43138fd1498Szrj 	      new_target = single_succ (target);
43238fd1498Szrj 	      if (target == new_target)
43338fd1498Szrj 		counter = n_basic_blocks_for_fn (cfun);
43438fd1498Szrj 	      else if (!optimize)
43538fd1498Szrj 		{
43638fd1498Szrj 		  /* When not optimizing, ensure that edges or forwarder
43738fd1498Szrj 		     blocks with different locus are not optimized out.  */
43838fd1498Szrj 		  location_t new_locus = single_succ_edge (target)->goto_locus;
43938fd1498Szrj 		  location_t locus = goto_locus;
44038fd1498Szrj 
44138fd1498Szrj 		  if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
44238fd1498Szrj 		      && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
44338fd1498Szrj 		      && new_locus != locus)
44438fd1498Szrj 		    new_target = NULL;
44538fd1498Szrj 		  else
44638fd1498Szrj 		    {
44738fd1498Szrj 		      if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
44838fd1498Szrj 			locus = new_locus;
44938fd1498Szrj 
45038fd1498Szrj 		      rtx_insn *last = BB_END (target);
45138fd1498Szrj 		      if (DEBUG_INSN_P (last))
45238fd1498Szrj 			last = prev_nondebug_insn (last);
45338fd1498Szrj 		      if (last && INSN_P (last))
45438fd1498Szrj 			new_locus = INSN_LOCATION (last);
45538fd1498Szrj 		      else
45638fd1498Szrj 			new_locus = UNKNOWN_LOCATION;
45738fd1498Szrj 
45838fd1498Szrj 		      if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
45938fd1498Szrj 			  && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
46038fd1498Szrj 			  && new_locus != locus)
46138fd1498Szrj 			new_target = NULL;
46238fd1498Szrj 		      else
46338fd1498Szrj 			{
46438fd1498Szrj 			  if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
46538fd1498Szrj 			    locus = new_locus;
46638fd1498Szrj 
46738fd1498Szrj 			  goto_locus = locus;
46838fd1498Szrj 			}
46938fd1498Szrj 		    }
47038fd1498Szrj 		}
47138fd1498Szrj 	    }
47238fd1498Szrj 
47338fd1498Szrj 	  /* Allow to thread only over one edge at time to simplify updating
47438fd1498Szrj 	     of probabilities.  */
47538fd1498Szrj 	  else if ((mode & CLEANUP_THREADING) && may_thread)
47638fd1498Szrj 	    {
47738fd1498Szrj 	      edge t = thread_jump (e, target);
47838fd1498Szrj 	      if (t)
47938fd1498Szrj 		{
48038fd1498Szrj 		  if (!threaded_edges)
48138fd1498Szrj 		    threaded_edges = XNEWVEC (edge,
48238fd1498Szrj 					      n_basic_blocks_for_fn (cfun));
48338fd1498Szrj 		  else
48438fd1498Szrj 		    {
48538fd1498Szrj 		      int i;
48638fd1498Szrj 
48738fd1498Szrj 		      /* Detect an infinite loop across blocks not
48838fd1498Szrj 			 including the start block.  */
48938fd1498Szrj 		      for (i = 0; i < nthreaded_edges; ++i)
49038fd1498Szrj 			if (threaded_edges[i] == t)
49138fd1498Szrj 			  break;
49238fd1498Szrj 		      if (i < nthreaded_edges)
49338fd1498Szrj 			{
49438fd1498Szrj 			  counter = n_basic_blocks_for_fn (cfun);
49538fd1498Szrj 			  break;
49638fd1498Szrj 			}
49738fd1498Szrj 		    }
49838fd1498Szrj 
49938fd1498Szrj 		  /* Detect an infinite loop across the start block.  */
50038fd1498Szrj 		  if (t->dest == b)
50138fd1498Szrj 		    break;
50238fd1498Szrj 
50338fd1498Szrj 		  gcc_assert (nthreaded_edges
50438fd1498Szrj 			      < (n_basic_blocks_for_fn (cfun)
50538fd1498Szrj 				 - NUM_FIXED_BLOCKS));
50638fd1498Szrj 		  threaded_edges[nthreaded_edges++] = t;
50738fd1498Szrj 
50838fd1498Szrj 		  new_target = t->dest;
50938fd1498Szrj 		  new_target_threaded = true;
51038fd1498Szrj 		}
51138fd1498Szrj 	    }
51238fd1498Szrj 
51338fd1498Szrj 	  if (!new_target)
51438fd1498Szrj 	    break;
51538fd1498Szrj 
51638fd1498Szrj 	  counter++;
51738fd1498Szrj 	  /* Do not turn non-crossing jump to crossing.  Depending on target
51838fd1498Szrj 	     it may require different instruction pattern.  */
51938fd1498Szrj 	  if ((e->flags & EDGE_CROSSING)
52038fd1498Szrj 	      || BB_PARTITION (first) == BB_PARTITION (new_target))
52138fd1498Szrj 	    {
52238fd1498Szrj 	      target = new_target;
52338fd1498Szrj 	      threaded |= new_target_threaded;
52438fd1498Szrj 	    }
52538fd1498Szrj 	}
52638fd1498Szrj 
52738fd1498Szrj       if (counter >= n_basic_blocks_for_fn (cfun))
52838fd1498Szrj 	{
52938fd1498Szrj 	  if (dump_file)
53038fd1498Szrj 	    fprintf (dump_file, "Infinite loop in BB %i.\n",
53138fd1498Szrj 		     target->index);
53238fd1498Szrj 	}
53338fd1498Szrj       else if (target == first)
53438fd1498Szrj 	; /* We didn't do anything.  */
53538fd1498Szrj       else
53638fd1498Szrj 	{
53738fd1498Szrj 	  /* Save the values now, as the edge may get removed.  */
53838fd1498Szrj 	  profile_count edge_count = e->count ();
53938fd1498Szrj 	  int n = 0;
54038fd1498Szrj 
54138fd1498Szrj 	  e->goto_locus = goto_locus;
54238fd1498Szrj 
54338fd1498Szrj 	  /* Don't force if target is exit block.  */
54438fd1498Szrj 	  if (threaded && target != EXIT_BLOCK_PTR_FOR_FN (cfun))
54538fd1498Szrj 	    {
54638fd1498Szrj 	      notice_new_block (redirect_edge_and_branch_force (e, target));
54738fd1498Szrj 	      if (dump_file)
54838fd1498Szrj 		fprintf (dump_file, "Conditionals threaded.\n");
54938fd1498Szrj 	    }
55038fd1498Szrj 	  else if (!redirect_edge_and_branch (e, target))
55138fd1498Szrj 	    {
55238fd1498Szrj 	      if (dump_file)
55338fd1498Szrj 		fprintf (dump_file,
55438fd1498Szrj 			 "Forwarding edge %i->%i to %i failed.\n",
55538fd1498Szrj 			 b->index, e->dest->index, target->index);
55638fd1498Szrj 	      ei_next (&ei);
55738fd1498Szrj 	      continue;
55838fd1498Szrj 	    }
55938fd1498Szrj 
56038fd1498Szrj 	  /* We successfully forwarded the edge.  Now update profile
56138fd1498Szrj 	     data: for each edge we traversed in the chain, remove
56238fd1498Szrj 	     the original edge's execution count.  */
56338fd1498Szrj 	  do
56438fd1498Szrj 	    {
56538fd1498Szrj 	      edge t;
56638fd1498Szrj 
56738fd1498Szrj 	      if (!single_succ_p (first))
56838fd1498Szrj 		{
56938fd1498Szrj 		  gcc_assert (n < nthreaded_edges);
57038fd1498Szrj 		  t = threaded_edges [n++];
57138fd1498Szrj 		  gcc_assert (t->src == first);
57238fd1498Szrj 		  update_bb_profile_for_threading (first, edge_count, t);
57338fd1498Szrj 		  update_br_prob_note (first);
57438fd1498Szrj 		}
57538fd1498Szrj 	      else
57638fd1498Szrj 		{
57738fd1498Szrj 		  first->count -= edge_count;
57838fd1498Szrj 		  /* It is possible that as the result of
57938fd1498Szrj 		     threading we've removed edge as it is
58038fd1498Szrj 		     threaded to the fallthru edge.  Avoid
58138fd1498Szrj 		     getting out of sync.  */
58238fd1498Szrj 		  if (n < nthreaded_edges
58338fd1498Szrj 		      && first == threaded_edges [n]->src)
58438fd1498Szrj 		    n++;
58538fd1498Szrj 		  t = single_succ_edge (first);
58638fd1498Szrj 		}
58738fd1498Szrj 
58838fd1498Szrj 	      first = t->dest;
58938fd1498Szrj 	    }
59038fd1498Szrj 	  while (first != target);
59138fd1498Szrj 
59238fd1498Szrj 	  changed = true;
59338fd1498Szrj 	  continue;
59438fd1498Szrj 	}
59538fd1498Szrj       ei_next (&ei);
59638fd1498Szrj     }
59738fd1498Szrj 
59838fd1498Szrj   free (threaded_edges);
59938fd1498Szrj   return changed;
60038fd1498Szrj }
60138fd1498Szrj 
60238fd1498Szrj 
60338fd1498Szrj /* Blocks A and B are to be merged into a single block.  A has no incoming
60438fd1498Szrj    fallthru edge, so it can be moved before B without adding or modifying
60538fd1498Szrj    any jumps (aside from the jump from A to B).  */
60638fd1498Szrj 
60738fd1498Szrj static void
merge_blocks_move_predecessor_nojumps(basic_block a,basic_block b)60838fd1498Szrj merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
60938fd1498Szrj {
61038fd1498Szrj   rtx_insn *barrier;
61138fd1498Szrj 
61238fd1498Szrj   /* If we are partitioning hot/cold basic blocks, we don't want to
61338fd1498Szrj      mess up unconditional or indirect jumps that cross between hot
61438fd1498Szrj      and cold sections.
61538fd1498Szrj 
61638fd1498Szrj      Basic block partitioning may result in some jumps that appear to
61738fd1498Szrj      be optimizable (or blocks that appear to be mergeable), but which really
61838fd1498Szrj      must be left untouched (they are required to make it safely across
61938fd1498Szrj      partition boundaries).  See the comments at the top of
62038fd1498Szrj      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
62138fd1498Szrj 
62238fd1498Szrj   if (BB_PARTITION (a) != BB_PARTITION (b))
62338fd1498Szrj     return;
62438fd1498Szrj 
62538fd1498Szrj   barrier = next_nonnote_insn (BB_END (a));
62638fd1498Szrj   gcc_assert (BARRIER_P (barrier));
62738fd1498Szrj   delete_insn (barrier);
62838fd1498Szrj 
62938fd1498Szrj   /* Scramble the insn chain.  */
63038fd1498Szrj   if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
63138fd1498Szrj     reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
63238fd1498Szrj   df_set_bb_dirty (a);
63338fd1498Szrj 
63438fd1498Szrj   if (dump_file)
63538fd1498Szrj     fprintf (dump_file, "Moved block %d before %d and merged.\n",
63638fd1498Szrj 	     a->index, b->index);
63738fd1498Szrj 
63838fd1498Szrj   /* Swap the records for the two blocks around.  */
63938fd1498Szrj 
64038fd1498Szrj   unlink_block (a);
64138fd1498Szrj   link_block (a, b->prev_bb);
64238fd1498Szrj 
64338fd1498Szrj   /* Now blocks A and B are contiguous.  Merge them.  */
64438fd1498Szrj   merge_blocks (a, b);
64538fd1498Szrj }
64638fd1498Szrj 
64738fd1498Szrj /* Blocks A and B are to be merged into a single block.  B has no outgoing
64838fd1498Szrj    fallthru edge, so it can be moved after A without adding or modifying
64938fd1498Szrj    any jumps (aside from the jump from A to B).  */
65038fd1498Szrj 
65138fd1498Szrj static void
merge_blocks_move_successor_nojumps(basic_block a,basic_block b)65238fd1498Szrj merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
65338fd1498Szrj {
65438fd1498Szrj   rtx_insn *barrier, *real_b_end;
65538fd1498Szrj   rtx_insn *label;
65638fd1498Szrj   rtx_jump_table_data *table;
65738fd1498Szrj 
65838fd1498Szrj   /* If we are partitioning hot/cold basic blocks, we don't want to
65938fd1498Szrj      mess up unconditional or indirect jumps that cross between hot
66038fd1498Szrj      and cold sections.
66138fd1498Szrj 
66238fd1498Szrj      Basic block partitioning may result in some jumps that appear to
66338fd1498Szrj      be optimizable (or blocks that appear to be mergeable), but which really
66438fd1498Szrj      must be left untouched (they are required to make it safely across
66538fd1498Szrj      partition boundaries).  See the comments at the top of
66638fd1498Szrj      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
66738fd1498Szrj 
66838fd1498Szrj   if (BB_PARTITION (a) != BB_PARTITION (b))
66938fd1498Szrj     return;
67038fd1498Szrj 
67138fd1498Szrj   real_b_end = BB_END (b);
67238fd1498Szrj 
67338fd1498Szrj   /* If there is a jump table following block B temporarily add the jump table
67438fd1498Szrj      to block B so that it will also be moved to the correct location.  */
67538fd1498Szrj   if (tablejump_p (BB_END (b), &label, &table)
67638fd1498Szrj       && prev_active_insn (label) == BB_END (b))
67738fd1498Szrj     {
67838fd1498Szrj       BB_END (b) = table;
67938fd1498Szrj     }
68038fd1498Szrj 
68138fd1498Szrj   /* There had better have been a barrier there.  Delete it.  */
68238fd1498Szrj   barrier = NEXT_INSN (BB_END (b));
68338fd1498Szrj   if (barrier && BARRIER_P (barrier))
68438fd1498Szrj     delete_insn (barrier);
68538fd1498Szrj 
68638fd1498Szrj 
68738fd1498Szrj   /* Scramble the insn chain.  */
68838fd1498Szrj   reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
68938fd1498Szrj 
69038fd1498Szrj   /* Restore the real end of b.  */
69138fd1498Szrj   BB_END (b) = real_b_end;
69238fd1498Szrj 
69338fd1498Szrj   if (dump_file)
69438fd1498Szrj     fprintf (dump_file, "Moved block %d after %d and merged.\n",
69538fd1498Szrj 	     b->index, a->index);
69638fd1498Szrj 
69738fd1498Szrj   /* Now blocks A and B are contiguous.  Merge them.  */
69838fd1498Szrj   merge_blocks (a, b);
69938fd1498Szrj }
70038fd1498Szrj 
70138fd1498Szrj /* Attempt to merge basic blocks that are potentially non-adjacent.
70238fd1498Szrj    Return NULL iff the attempt failed, otherwise return basic block
70338fd1498Szrj    where cleanup_cfg should continue.  Because the merging commonly
70438fd1498Szrj    moves basic block away or introduces another optimization
70538fd1498Szrj    possibility, return basic block just before B so cleanup_cfg don't
70638fd1498Szrj    need to iterate.
70738fd1498Szrj 
70838fd1498Szrj    It may be good idea to return basic block before C in the case
70938fd1498Szrj    C has been moved after B and originally appeared earlier in the
71038fd1498Szrj    insn sequence, but we have no information available about the
71138fd1498Szrj    relative ordering of these two.  Hopefully it is not too common.  */
71238fd1498Szrj 
71338fd1498Szrj static basic_block
merge_blocks_move(edge e,basic_block b,basic_block c,int mode)71438fd1498Szrj merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
71538fd1498Szrj {
71638fd1498Szrj   basic_block next;
71738fd1498Szrj 
71838fd1498Szrj   /* If we are partitioning hot/cold basic blocks, we don't want to
71938fd1498Szrj      mess up unconditional or indirect jumps that cross between hot
72038fd1498Szrj      and cold sections.
72138fd1498Szrj 
72238fd1498Szrj      Basic block partitioning may result in some jumps that appear to
72338fd1498Szrj      be optimizable (or blocks that appear to be mergeable), but which really
72438fd1498Szrj      must be left untouched (they are required to make it safely across
72538fd1498Szrj      partition boundaries).  See the comments at the top of
72638fd1498Szrj      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
72738fd1498Szrj 
72838fd1498Szrj   if (BB_PARTITION (b) != BB_PARTITION (c))
72938fd1498Szrj     return NULL;
73038fd1498Szrj 
73138fd1498Szrj   /* If B has a fallthru edge to C, no need to move anything.  */
73238fd1498Szrj   if (e->flags & EDGE_FALLTHRU)
73338fd1498Szrj     {
73438fd1498Szrj       int b_index = b->index, c_index = c->index;
73538fd1498Szrj 
73638fd1498Szrj       /* Protect the loop latches.  */
73738fd1498Szrj       if (current_loops && c->loop_father->latch == c)
73838fd1498Szrj 	return NULL;
73938fd1498Szrj 
74038fd1498Szrj       merge_blocks (b, c);
74138fd1498Szrj       update_forwarder_flag (b);
74238fd1498Szrj 
74338fd1498Szrj       if (dump_file)
74438fd1498Szrj 	fprintf (dump_file, "Merged %d and %d without moving.\n",
74538fd1498Szrj 		 b_index, c_index);
74638fd1498Szrj 
74738fd1498Szrj       return b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? b : b->prev_bb;
74838fd1498Szrj     }
74938fd1498Szrj 
75038fd1498Szrj   /* Otherwise we will need to move code around.  Do that only if expensive
75138fd1498Szrj      transformations are allowed.  */
75238fd1498Szrj   else if (mode & CLEANUP_EXPENSIVE)
75338fd1498Szrj     {
75438fd1498Szrj       edge tmp_edge, b_fallthru_edge;
75538fd1498Szrj       bool c_has_outgoing_fallthru;
75638fd1498Szrj       bool b_has_incoming_fallthru;
75738fd1498Szrj 
75838fd1498Szrj       /* Avoid overactive code motion, as the forwarder blocks should be
75938fd1498Szrj 	 eliminated by edge redirection instead.  One exception might have
76038fd1498Szrj 	 been if B is a forwarder block and C has no fallthru edge, but
76138fd1498Szrj 	 that should be cleaned up by bb-reorder instead.  */
76238fd1498Szrj       if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
76338fd1498Szrj 	return NULL;
76438fd1498Szrj 
76538fd1498Szrj       /* We must make sure to not munge nesting of lexical blocks,
76638fd1498Szrj 	 and loop notes.  This is done by squeezing out all the notes
76738fd1498Szrj 	 and leaving them there to lie.  Not ideal, but functional.  */
76838fd1498Szrj 
76938fd1498Szrj       tmp_edge = find_fallthru_edge (c->succs);
77038fd1498Szrj       c_has_outgoing_fallthru = (tmp_edge != NULL);
77138fd1498Szrj 
77238fd1498Szrj       tmp_edge = find_fallthru_edge (b->preds);
77338fd1498Szrj       b_has_incoming_fallthru = (tmp_edge != NULL);
77438fd1498Szrj       b_fallthru_edge = tmp_edge;
77538fd1498Szrj       next = b->prev_bb;
77638fd1498Szrj       if (next == c)
77738fd1498Szrj 	next = next->prev_bb;
77838fd1498Szrj 
77938fd1498Szrj       /* Otherwise, we're going to try to move C after B.  If C does
78038fd1498Szrj 	 not have an outgoing fallthru, then it can be moved
78138fd1498Szrj 	 immediately after B without introducing or modifying jumps.  */
78238fd1498Szrj       if (! c_has_outgoing_fallthru)
78338fd1498Szrj 	{
78438fd1498Szrj 	  merge_blocks_move_successor_nojumps (b, c);
78538fd1498Szrj 	  return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
78638fd1498Szrj 	}
78738fd1498Szrj 
78838fd1498Szrj       /* If B does not have an incoming fallthru, then it can be moved
78938fd1498Szrj 	 immediately before C without introducing or modifying jumps.
79038fd1498Szrj 	 C cannot be the first block, so we do not have to worry about
79138fd1498Szrj 	 accessing a non-existent block.  */
79238fd1498Szrj 
79338fd1498Szrj       if (b_has_incoming_fallthru)
79438fd1498Szrj 	{
79538fd1498Szrj 	  basic_block bb;
79638fd1498Szrj 
79738fd1498Szrj 	  if (b_fallthru_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
79838fd1498Szrj 	    return NULL;
79938fd1498Szrj 	  bb = force_nonfallthru (b_fallthru_edge);
80038fd1498Szrj 	  if (bb)
80138fd1498Szrj 	    notice_new_block (bb);
80238fd1498Szrj 	}
80338fd1498Szrj 
80438fd1498Szrj       merge_blocks_move_predecessor_nojumps (b, c);
80538fd1498Szrj       return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
80638fd1498Szrj     }
80738fd1498Szrj 
80838fd1498Szrj   return NULL;
80938fd1498Szrj }
81038fd1498Szrj 
81138fd1498Szrj 
81238fd1498Szrj /* Removes the memory attributes of MEM expression
81338fd1498Szrj    if they are not equal.  */
81438fd1498Szrj 
81538fd1498Szrj static void
merge_memattrs(rtx x,rtx y)81638fd1498Szrj merge_memattrs (rtx x, rtx y)
81738fd1498Szrj {
81838fd1498Szrj   int i;
81938fd1498Szrj   int j;
82038fd1498Szrj   enum rtx_code code;
82138fd1498Szrj   const char *fmt;
82238fd1498Szrj 
82338fd1498Szrj   if (x == y)
82438fd1498Szrj     return;
82538fd1498Szrj   if (x == 0 || y == 0)
82638fd1498Szrj     return;
82738fd1498Szrj 
82838fd1498Szrj   code = GET_CODE (x);
82938fd1498Szrj 
83038fd1498Szrj   if (code != GET_CODE (y))
83138fd1498Szrj     return;
83238fd1498Szrj 
83338fd1498Szrj   if (GET_MODE (x) != GET_MODE (y))
83438fd1498Szrj     return;
83538fd1498Szrj 
83638fd1498Szrj   if (code == MEM && !mem_attrs_eq_p (MEM_ATTRS (x), MEM_ATTRS (y)))
83738fd1498Szrj     {
83838fd1498Szrj       if (! MEM_ATTRS (x))
83938fd1498Szrj 	MEM_ATTRS (y) = 0;
84038fd1498Szrj       else if (! MEM_ATTRS (y))
84138fd1498Szrj 	MEM_ATTRS (x) = 0;
84238fd1498Szrj       else
84338fd1498Szrj 	{
84438fd1498Szrj 	  if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
84538fd1498Szrj 	    {
84638fd1498Szrj 	      set_mem_alias_set (x, 0);
84738fd1498Szrj 	      set_mem_alias_set (y, 0);
84838fd1498Szrj 	    }
84938fd1498Szrj 
85038fd1498Szrj 	  if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
85138fd1498Szrj 	    {
85238fd1498Szrj 	      set_mem_expr (x, 0);
85338fd1498Szrj 	      set_mem_expr (y, 0);
85438fd1498Szrj 	      clear_mem_offset (x);
85538fd1498Szrj 	      clear_mem_offset (y);
85638fd1498Szrj 	    }
85738fd1498Szrj 	  else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y)
85838fd1498Szrj 		   || (MEM_OFFSET_KNOWN_P (x)
85938fd1498Szrj 		       && maybe_ne (MEM_OFFSET (x), MEM_OFFSET (y))))
86038fd1498Szrj 	    {
86138fd1498Szrj 	      clear_mem_offset (x);
86238fd1498Szrj 	      clear_mem_offset (y);
86338fd1498Szrj 	    }
86438fd1498Szrj 
86538fd1498Szrj 	  if (!MEM_SIZE_KNOWN_P (x))
86638fd1498Szrj 	    clear_mem_size (y);
86738fd1498Szrj 	  else if (!MEM_SIZE_KNOWN_P (y))
86838fd1498Szrj 	    clear_mem_size (x);
86938fd1498Szrj 	  else if (known_le (MEM_SIZE (x), MEM_SIZE (y)))
87038fd1498Szrj 	    set_mem_size (x, MEM_SIZE (y));
87138fd1498Szrj 	  else if (known_le (MEM_SIZE (y), MEM_SIZE (x)))
87238fd1498Szrj 	    set_mem_size (y, MEM_SIZE (x));
87338fd1498Szrj 	  else
87438fd1498Szrj 	    {
87538fd1498Szrj 	      /* The sizes aren't ordered, so we can't merge them.  */
87638fd1498Szrj 	      clear_mem_size (x);
87738fd1498Szrj 	      clear_mem_size (y);
87838fd1498Szrj 	    }
87938fd1498Szrj 
88038fd1498Szrj 	  set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
88138fd1498Szrj 	  set_mem_align (y, MEM_ALIGN (x));
88238fd1498Szrj 	}
88338fd1498Szrj     }
88438fd1498Szrj   if (code == MEM)
88538fd1498Szrj     {
88638fd1498Szrj       if (MEM_READONLY_P (x) != MEM_READONLY_P (y))
88738fd1498Szrj 	{
88838fd1498Szrj 	  MEM_READONLY_P (x) = 0;
88938fd1498Szrj 	  MEM_READONLY_P (y) = 0;
89038fd1498Szrj 	}
89138fd1498Szrj       if (MEM_NOTRAP_P (x) != MEM_NOTRAP_P (y))
89238fd1498Szrj 	{
89338fd1498Szrj 	  MEM_NOTRAP_P (x) = 0;
89438fd1498Szrj 	  MEM_NOTRAP_P (y) = 0;
89538fd1498Szrj 	}
89638fd1498Szrj       if (MEM_VOLATILE_P (x) != MEM_VOLATILE_P (y))
89738fd1498Szrj 	{
89838fd1498Szrj 	  MEM_VOLATILE_P (x) = 1;
89938fd1498Szrj 	  MEM_VOLATILE_P (y) = 1;
90038fd1498Szrj 	}
90138fd1498Szrj     }
90238fd1498Szrj 
90338fd1498Szrj   fmt = GET_RTX_FORMAT (code);
90438fd1498Szrj   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
90538fd1498Szrj     {
90638fd1498Szrj       switch (fmt[i])
90738fd1498Szrj 	{
90838fd1498Szrj 	case 'E':
90938fd1498Szrj 	  /* Two vectors must have the same length.  */
91038fd1498Szrj 	  if (XVECLEN (x, i) != XVECLEN (y, i))
91138fd1498Szrj 	    return;
91238fd1498Szrj 
91338fd1498Szrj 	  for (j = 0; j < XVECLEN (x, i); j++)
91438fd1498Szrj 	    merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
91538fd1498Szrj 
91638fd1498Szrj 	  break;
91738fd1498Szrj 
91838fd1498Szrj 	case 'e':
91938fd1498Szrj 	  merge_memattrs (XEXP (x, i), XEXP (y, i));
92038fd1498Szrj 	}
92138fd1498Szrj     }
92238fd1498Szrj   return;
92338fd1498Szrj }
92438fd1498Szrj 
92538fd1498Szrj 
92638fd1498Szrj  /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
92738fd1498Szrj     different single sets S1 and S2.  */
92838fd1498Szrj 
92938fd1498Szrj static bool
equal_different_set_p(rtx p1,rtx s1,rtx p2,rtx s2)93038fd1498Szrj equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2)
93138fd1498Szrj {
93238fd1498Szrj   int i;
93338fd1498Szrj   rtx e1, e2;
93438fd1498Szrj 
93538fd1498Szrj   if (p1 == s1 && p2 == s2)
93638fd1498Szrj     return true;
93738fd1498Szrj 
93838fd1498Szrj   if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL)
93938fd1498Szrj     return false;
94038fd1498Szrj 
94138fd1498Szrj   if (XVECLEN (p1, 0) != XVECLEN (p2, 0))
94238fd1498Szrj     return false;
94338fd1498Szrj 
94438fd1498Szrj   for (i = 0; i < XVECLEN (p1, 0); i++)
94538fd1498Szrj     {
94638fd1498Szrj       e1 = XVECEXP (p1, 0, i);
94738fd1498Szrj       e2 = XVECEXP (p2, 0, i);
94838fd1498Szrj       if (e1 == s1 && e2 == s2)
94938fd1498Szrj         continue;
95038fd1498Szrj       if (reload_completed
95138fd1498Szrj           ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2))
95238fd1498Szrj         continue;
95338fd1498Szrj 
95438fd1498Szrj       return false;
95538fd1498Szrj     }
95638fd1498Szrj 
95738fd1498Szrj   return true;
95838fd1498Szrj }
95938fd1498Szrj 
96038fd1498Szrj 
96138fd1498Szrj /* NOTE1 is the REG_EQUAL note, if any, attached to an insn
96238fd1498Szrj    that is a single_set with a SET_SRC of SRC1.  Similarly
96338fd1498Szrj    for NOTE2/SRC2.
96438fd1498Szrj 
96538fd1498Szrj    So effectively NOTE1/NOTE2 are an alternate form of
96638fd1498Szrj    SRC1/SRC2 respectively.
96738fd1498Szrj 
96838fd1498Szrj    Return nonzero if SRC1 or NOTE1 has the same constant
96938fd1498Szrj    integer value as SRC2 or NOTE2.   Else return zero.  */
97038fd1498Szrj static int
values_equal_p(rtx note1,rtx note2,rtx src1,rtx src2)97138fd1498Szrj values_equal_p (rtx note1, rtx note2, rtx src1, rtx src2)
97238fd1498Szrj {
97338fd1498Szrj   if (note1
97438fd1498Szrj       && note2
97538fd1498Szrj       && CONST_INT_P (XEXP (note1, 0))
97638fd1498Szrj       && rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0)))
97738fd1498Szrj     return 1;
97838fd1498Szrj 
97938fd1498Szrj   if (!note1
98038fd1498Szrj       && !note2
98138fd1498Szrj       && CONST_INT_P (src1)
98238fd1498Szrj       && CONST_INT_P (src2)
98338fd1498Szrj       && rtx_equal_p (src1, src2))
98438fd1498Szrj     return 1;
98538fd1498Szrj 
98638fd1498Szrj   if (note1
98738fd1498Szrj       && CONST_INT_P (src2)
98838fd1498Szrj       && rtx_equal_p (XEXP (note1, 0), src2))
98938fd1498Szrj     return 1;
99038fd1498Szrj 
99138fd1498Szrj   if (note2
99238fd1498Szrj       && CONST_INT_P (src1)
99338fd1498Szrj       && rtx_equal_p (XEXP (note2, 0), src1))
99438fd1498Szrj     return 1;
99538fd1498Szrj 
99638fd1498Szrj   return 0;
99738fd1498Szrj }
99838fd1498Szrj 
99938fd1498Szrj /* Examine register notes on I1 and I2 and return:
100038fd1498Szrj    - dir_forward if I1 can be replaced by I2, or
100138fd1498Szrj    - dir_backward if I2 can be replaced by I1, or
100238fd1498Szrj    - dir_both if both are the case.  */
100338fd1498Szrj 
100438fd1498Szrj static enum replace_direction
can_replace_by(rtx_insn * i1,rtx_insn * i2)100538fd1498Szrj can_replace_by (rtx_insn *i1, rtx_insn *i2)
100638fd1498Szrj {
100738fd1498Szrj   rtx s1, s2, d1, d2, src1, src2, note1, note2;
100838fd1498Szrj   bool c1, c2;
100938fd1498Szrj 
101038fd1498Szrj   /* Check for 2 sets.  */
101138fd1498Szrj   s1 = single_set (i1);
101238fd1498Szrj   s2 = single_set (i2);
101338fd1498Szrj   if (s1 == NULL_RTX || s2 == NULL_RTX)
101438fd1498Szrj     return dir_none;
101538fd1498Szrj 
101638fd1498Szrj   /* Check that the 2 sets set the same dest.  */
101738fd1498Szrj   d1 = SET_DEST (s1);
101838fd1498Szrj   d2 = SET_DEST (s2);
101938fd1498Szrj   if (!(reload_completed
102038fd1498Szrj         ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2)))
102138fd1498Szrj     return dir_none;
102238fd1498Szrj 
102338fd1498Szrj   /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
102438fd1498Szrj      set dest to the same value.  */
102538fd1498Szrj   note1 = find_reg_equal_equiv_note (i1);
102638fd1498Szrj   note2 = find_reg_equal_equiv_note (i2);
102738fd1498Szrj 
102838fd1498Szrj   src1 = SET_SRC (s1);
102938fd1498Szrj   src2 = SET_SRC (s2);
103038fd1498Szrj 
103138fd1498Szrj   if (!values_equal_p (note1, note2, src1, src2))
103238fd1498Szrj     return dir_none;
103338fd1498Szrj 
103438fd1498Szrj   if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2))
103538fd1498Szrj     return dir_none;
103638fd1498Szrj 
103738fd1498Szrj   /* Although the 2 sets set dest to the same value, we cannot replace
103838fd1498Szrj        (set (dest) (const_int))
103938fd1498Szrj      by
104038fd1498Szrj        (set (dest) (reg))
104138fd1498Szrj      because we don't know if the reg is live and has the same value at the
104238fd1498Szrj      location of replacement.  */
104338fd1498Szrj   c1 = CONST_INT_P (src1);
104438fd1498Szrj   c2 = CONST_INT_P (src2);
104538fd1498Szrj   if (c1 && c2)
104638fd1498Szrj     return dir_both;
104738fd1498Szrj   else if (c2)
104838fd1498Szrj     return dir_forward;
104938fd1498Szrj   else if (c1)
105038fd1498Szrj     return dir_backward;
105138fd1498Szrj 
105238fd1498Szrj   return dir_none;
105338fd1498Szrj }
105438fd1498Szrj 
105538fd1498Szrj /* Merges directions A and B.  */
105638fd1498Szrj 
105738fd1498Szrj static enum replace_direction
merge_dir(enum replace_direction a,enum replace_direction b)105838fd1498Szrj merge_dir (enum replace_direction a, enum replace_direction b)
105938fd1498Szrj {
106038fd1498Szrj   /* Implements the following table:
106138fd1498Szrj         |bo fw bw no
106238fd1498Szrj      ---+-----------
106338fd1498Szrj      bo |bo fw bw no
106438fd1498Szrj      fw |-- fw no no
106538fd1498Szrj      bw |-- -- bw no
106638fd1498Szrj      no |-- -- -- no.  */
106738fd1498Szrj 
106838fd1498Szrj   if (a == b)
106938fd1498Szrj     return a;
107038fd1498Szrj 
107138fd1498Szrj   if (a == dir_both)
107238fd1498Szrj     return b;
107338fd1498Szrj   if (b == dir_both)
107438fd1498Szrj     return a;
107538fd1498Szrj 
107638fd1498Szrj   return dir_none;
107738fd1498Szrj }
107838fd1498Szrj 
107938fd1498Szrj /* Array of flags indexed by reg note kind, true if the given
108038fd1498Szrj    reg note is CFA related.  */
108138fd1498Szrj static const bool reg_note_cfa_p[] = {
108238fd1498Szrj #undef REG_CFA_NOTE
108338fd1498Szrj #define DEF_REG_NOTE(NAME) false,
108438fd1498Szrj #define REG_CFA_NOTE(NAME) true,
108538fd1498Szrj #include "reg-notes.def"
108638fd1498Szrj #undef REG_CFA_NOTE
108738fd1498Szrj #undef DEF_REG_NOTE
108838fd1498Szrj   false
108938fd1498Szrj };
109038fd1498Szrj 
109138fd1498Szrj /* Return true if I1 and I2 have identical CFA notes (the same order
109238fd1498Szrj    and equivalent content).  */
109338fd1498Szrj 
109438fd1498Szrj static bool
insns_have_identical_cfa_notes(rtx_insn * i1,rtx_insn * i2)109538fd1498Szrj insns_have_identical_cfa_notes (rtx_insn *i1, rtx_insn *i2)
109638fd1498Szrj {
109738fd1498Szrj   rtx n1, n2;
109838fd1498Szrj   for (n1 = REG_NOTES (i1), n2 = REG_NOTES (i2); ;
109938fd1498Szrj        n1 = XEXP (n1, 1), n2 = XEXP (n2, 1))
110038fd1498Szrj     {
110138fd1498Szrj       /* Skip over reg notes not related to CFI information.  */
110238fd1498Szrj       while (n1 && !reg_note_cfa_p[REG_NOTE_KIND (n1)])
110338fd1498Szrj 	n1 = XEXP (n1, 1);
110438fd1498Szrj       while (n2 && !reg_note_cfa_p[REG_NOTE_KIND (n2)])
110538fd1498Szrj 	n2 = XEXP (n2, 1);
110638fd1498Szrj       if (n1 == NULL_RTX && n2 == NULL_RTX)
110738fd1498Szrj 	return true;
110838fd1498Szrj       if (n1 == NULL_RTX || n2 == NULL_RTX)
110938fd1498Szrj 	return false;
111038fd1498Szrj       if (XEXP (n1, 0) == XEXP (n2, 0))
111138fd1498Szrj 	;
111238fd1498Szrj       else if (XEXP (n1, 0) == NULL_RTX || XEXP (n2, 0) == NULL_RTX)
111338fd1498Szrj 	return false;
111438fd1498Szrj       else if (!(reload_completed
111538fd1498Szrj 		 ? rtx_renumbered_equal_p (XEXP (n1, 0), XEXP (n2, 0))
111638fd1498Szrj 		 : rtx_equal_p (XEXP (n1, 0), XEXP (n2, 0))))
111738fd1498Szrj 	return false;
111838fd1498Szrj     }
111938fd1498Szrj }
112038fd1498Szrj 
112138fd1498Szrj /* Examine I1 and I2 and return:
112238fd1498Szrj    - dir_forward if I1 can be replaced by I2, or
112338fd1498Szrj    - dir_backward if I2 can be replaced by I1, or
112438fd1498Szrj    - dir_both if both are the case.  */
112538fd1498Szrj 
112638fd1498Szrj static enum replace_direction
old_insns_match_p(int mode ATTRIBUTE_UNUSED,rtx_insn * i1,rtx_insn * i2)112738fd1498Szrj old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx_insn *i1, rtx_insn *i2)
112838fd1498Szrj {
112938fd1498Szrj   rtx p1, p2;
113038fd1498Szrj 
113138fd1498Szrj   /* Verify that I1 and I2 are equivalent.  */
113238fd1498Szrj   if (GET_CODE (i1) != GET_CODE (i2))
113338fd1498Szrj     return dir_none;
113438fd1498Szrj 
113538fd1498Szrj   /* __builtin_unreachable() may lead to empty blocks (ending with
113638fd1498Szrj      NOTE_INSN_BASIC_BLOCK).  They may be crossjumped. */
113738fd1498Szrj   if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
113838fd1498Szrj     return dir_both;
113938fd1498Szrj 
114038fd1498Szrj   /* ??? Do not allow cross-jumping between different stack levels.  */
114138fd1498Szrj   p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL);
114238fd1498Szrj   p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL);
114338fd1498Szrj   if (p1 && p2)
114438fd1498Szrj     {
114538fd1498Szrj       p1 = XEXP (p1, 0);
114638fd1498Szrj       p2 = XEXP (p2, 0);
114738fd1498Szrj       if (!rtx_equal_p (p1, p2))
114838fd1498Szrj         return dir_none;
114938fd1498Szrj 
115038fd1498Szrj       /* ??? Worse, this adjustment had better be constant lest we
115138fd1498Szrj          have differing incoming stack levels.  */
115238fd1498Szrj       if (!frame_pointer_needed
115338fd1498Szrj 	  && known_eq (find_args_size_adjust (i1), HOST_WIDE_INT_MIN))
115438fd1498Szrj 	return dir_none;
115538fd1498Szrj     }
115638fd1498Szrj   else if (p1 || p2)
115738fd1498Szrj     return dir_none;
115838fd1498Szrj 
115938fd1498Szrj   /* Do not allow cross-jumping between frame related insns and other
116038fd1498Szrj      insns.  */
116138fd1498Szrj   if (RTX_FRAME_RELATED_P (i1) != RTX_FRAME_RELATED_P (i2))
116238fd1498Szrj     return dir_none;
116338fd1498Szrj 
116438fd1498Szrj   p1 = PATTERN (i1);
116538fd1498Szrj   p2 = PATTERN (i2);
116638fd1498Szrj 
116738fd1498Szrj   if (GET_CODE (p1) != GET_CODE (p2))
116838fd1498Szrj     return dir_none;
116938fd1498Szrj 
117038fd1498Szrj   /* If this is a CALL_INSN, compare register usage information.
117138fd1498Szrj      If we don't check this on stack register machines, the two
117238fd1498Szrj      CALL_INSNs might be merged leaving reg-stack.c with mismatching
117338fd1498Szrj      numbers of stack registers in the same basic block.
117438fd1498Szrj      If we don't check this on machines with delay slots, a delay slot may
117538fd1498Szrj      be filled that clobbers a parameter expected by the subroutine.
117638fd1498Szrj 
117738fd1498Szrj      ??? We take the simple route for now and assume that if they're
117838fd1498Szrj      equal, they were constructed identically.
117938fd1498Szrj 
118038fd1498Szrj      Also check for identical exception regions.  */
118138fd1498Szrj 
118238fd1498Szrj   if (CALL_P (i1))
118338fd1498Szrj     {
118438fd1498Szrj       /* Ensure the same EH region.  */
118538fd1498Szrj       rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
118638fd1498Szrj       rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);
118738fd1498Szrj 
118838fd1498Szrj       if (!n1 && n2)
118938fd1498Szrj 	return dir_none;
119038fd1498Szrj 
119138fd1498Szrj       if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
119238fd1498Szrj 	return dir_none;
119338fd1498Szrj 
119438fd1498Szrj       if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
119538fd1498Szrj 			CALL_INSN_FUNCTION_USAGE (i2))
119638fd1498Szrj 	  || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
119738fd1498Szrj 	return dir_none;
119838fd1498Szrj 
119938fd1498Szrj       /* For address sanitizer, never crossjump __asan_report_* builtins,
120038fd1498Szrj 	 otherwise errors might be reported on incorrect lines.  */
120138fd1498Szrj       if (flag_sanitize & SANITIZE_ADDRESS)
120238fd1498Szrj 	{
120338fd1498Szrj 	  rtx call = get_call_rtx_from (i1);
120438fd1498Szrj 	  if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
120538fd1498Szrj 	    {
120638fd1498Szrj 	      rtx symbol = XEXP (XEXP (call, 0), 0);
120738fd1498Szrj 	      if (SYMBOL_REF_DECL (symbol)
120838fd1498Szrj 		  && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
120938fd1498Szrj 		{
121038fd1498Szrj 		  if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
121138fd1498Szrj 		       == BUILT_IN_NORMAL)
121238fd1498Szrj 		      && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
121338fd1498Szrj 			 >= BUILT_IN_ASAN_REPORT_LOAD1
121438fd1498Szrj 		      && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
121538fd1498Szrj 			 <= BUILT_IN_ASAN_STOREN)
121638fd1498Szrj 		    return dir_none;
121738fd1498Szrj 		}
121838fd1498Szrj 	    }
121938fd1498Szrj 	}
122038fd1498Szrj     }
122138fd1498Szrj 
122238fd1498Szrj   /* If both i1 and i2 are frame related, verify all the CFA notes
122338fd1498Szrj      in the same order and with the same content.  */
122438fd1498Szrj   if (RTX_FRAME_RELATED_P (i1) && !insns_have_identical_cfa_notes (i1, i2))
122538fd1498Szrj     return dir_none;
122638fd1498Szrj 
122738fd1498Szrj #ifdef STACK_REGS
122838fd1498Szrj   /* If cross_jump_death_matters is not 0, the insn's mode
122938fd1498Szrj      indicates whether or not the insn contains any stack-like
123038fd1498Szrj      regs.  */
123138fd1498Szrj 
123238fd1498Szrj   if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
123338fd1498Szrj     {
123438fd1498Szrj       /* If register stack conversion has already been done, then
123538fd1498Szrj 	 death notes must also be compared before it is certain that
123638fd1498Szrj 	 the two instruction streams match.  */
123738fd1498Szrj 
123838fd1498Szrj       rtx note;
123938fd1498Szrj       HARD_REG_SET i1_regset, i2_regset;
124038fd1498Szrj 
124138fd1498Szrj       CLEAR_HARD_REG_SET (i1_regset);
124238fd1498Szrj       CLEAR_HARD_REG_SET (i2_regset);
124338fd1498Szrj 
124438fd1498Szrj       for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
124538fd1498Szrj 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
124638fd1498Szrj 	  SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
124738fd1498Szrj 
124838fd1498Szrj       for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
124938fd1498Szrj 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
125038fd1498Szrj 	  SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
125138fd1498Szrj 
125238fd1498Szrj       if (!hard_reg_set_equal_p (i1_regset, i2_regset))
125338fd1498Szrj 	return dir_none;
125438fd1498Szrj     }
125538fd1498Szrj #endif
125638fd1498Szrj 
125738fd1498Szrj   if (reload_completed
125838fd1498Szrj       ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
125938fd1498Szrj     return dir_both;
126038fd1498Szrj 
126138fd1498Szrj   return can_replace_by (i1, i2);
126238fd1498Szrj }
126338fd1498Szrj 
126438fd1498Szrj /* When comparing insns I1 and I2 in flow_find_cross_jump or
126538fd1498Szrj    flow_find_head_matching_sequence, ensure the notes match.  */
126638fd1498Szrj 
126738fd1498Szrj static void
merge_notes(rtx_insn * i1,rtx_insn * i2)126838fd1498Szrj merge_notes (rtx_insn *i1, rtx_insn *i2)
126938fd1498Szrj {
127038fd1498Szrj   /* If the merged insns have different REG_EQUAL notes, then
127138fd1498Szrj      remove them.  */
127238fd1498Szrj   rtx equiv1 = find_reg_equal_equiv_note (i1);
127338fd1498Szrj   rtx equiv2 = find_reg_equal_equiv_note (i2);
127438fd1498Szrj 
127538fd1498Szrj   if (equiv1 && !equiv2)
127638fd1498Szrj     remove_note (i1, equiv1);
127738fd1498Szrj   else if (!equiv1 && equiv2)
127838fd1498Szrj     remove_note (i2, equiv2);
127938fd1498Szrj   else if (equiv1 && equiv2
128038fd1498Szrj 	   && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
128138fd1498Szrj     {
128238fd1498Szrj       remove_note (i1, equiv1);
128338fd1498Szrj       remove_note (i2, equiv2);
128438fd1498Szrj     }
128538fd1498Szrj }
128638fd1498Szrj 
128738fd1498Szrj  /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
128838fd1498Szrj     resulting insn in I1, and the corresponding bb in BB1.  At the head of a
128938fd1498Szrj     bb, if there is a predecessor bb that reaches this bb via fallthru, and
129038fd1498Szrj     FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
129138fd1498Szrj     DID_FALLTHRU.  Otherwise, stops at the head of the bb.  */
129238fd1498Szrj 
129338fd1498Szrj static void
walk_to_nondebug_insn(rtx_insn ** i1,basic_block * bb1,bool follow_fallthru,bool * did_fallthru)129438fd1498Szrj walk_to_nondebug_insn (rtx_insn **i1, basic_block *bb1, bool follow_fallthru,
129538fd1498Szrj                        bool *did_fallthru)
129638fd1498Szrj {
129738fd1498Szrj   edge fallthru;
129838fd1498Szrj 
129938fd1498Szrj   *did_fallthru = false;
130038fd1498Szrj 
130138fd1498Szrj   /* Ignore notes.  */
130238fd1498Szrj   while (!NONDEBUG_INSN_P (*i1))
130338fd1498Szrj     {
130438fd1498Szrj       if (*i1 != BB_HEAD (*bb1))
130538fd1498Szrj         {
130638fd1498Szrj           *i1 = PREV_INSN (*i1);
130738fd1498Szrj           continue;
130838fd1498Szrj         }
130938fd1498Szrj 
131038fd1498Szrj       if (!follow_fallthru)
131138fd1498Szrj         return;
131238fd1498Szrj 
131338fd1498Szrj       fallthru = find_fallthru_edge ((*bb1)->preds);
131438fd1498Szrj       if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
131538fd1498Szrj           || !single_succ_p (fallthru->src))
131638fd1498Szrj         return;
131738fd1498Szrj 
131838fd1498Szrj       *bb1 = fallthru->src;
131938fd1498Szrj       *i1 = BB_END (*bb1);
132038fd1498Szrj       *did_fallthru = true;
132138fd1498Szrj      }
132238fd1498Szrj }
132338fd1498Szrj 
132438fd1498Szrj /* Look through the insns at the end of BB1 and BB2 and find the longest
132538fd1498Szrj    sequence that are either equivalent, or allow forward or backward
132638fd1498Szrj    replacement.  Store the first insns for that sequence in *F1 and *F2 and
132738fd1498Szrj    return the sequence length.
132838fd1498Szrj 
132938fd1498Szrj    DIR_P indicates the allowed replacement direction on function entry, and
133038fd1498Szrj    the actual replacement direction on function exit.  If NULL, only equivalent
133138fd1498Szrj    sequences are allowed.
133238fd1498Szrj 
133338fd1498Szrj    To simplify callers of this function, if the blocks match exactly,
133438fd1498Szrj    store the head of the blocks in *F1 and *F2.  */
133538fd1498Szrj 
133638fd1498Szrj int
flow_find_cross_jump(basic_block bb1,basic_block bb2,rtx_insn ** f1,rtx_insn ** f2,enum replace_direction * dir_p)133738fd1498Szrj flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx_insn **f1,
133838fd1498Szrj 		      rtx_insn **f2, enum replace_direction *dir_p)
133938fd1498Szrj {
134038fd1498Szrj   rtx_insn *i1, *i2, *last1, *last2, *afterlast1, *afterlast2;
134138fd1498Szrj   int ninsns = 0;
134238fd1498Szrj   enum replace_direction dir, last_dir, afterlast_dir;
134338fd1498Szrj   bool follow_fallthru, did_fallthru;
134438fd1498Szrj 
134538fd1498Szrj   if (dir_p)
134638fd1498Szrj     dir = *dir_p;
134738fd1498Szrj   else
134838fd1498Szrj     dir = dir_both;
134938fd1498Szrj   afterlast_dir = dir;
135038fd1498Szrj   last_dir = afterlast_dir;
135138fd1498Szrj 
135238fd1498Szrj   /* Skip simple jumps at the end of the blocks.  Complex jumps still
135338fd1498Szrj      need to be compared for equivalence, which we'll do below.  */
135438fd1498Szrj 
135538fd1498Szrj   i1 = BB_END (bb1);
135638fd1498Szrj   last1 = afterlast1 = last2 = afterlast2 = NULL;
135738fd1498Szrj   if (onlyjump_p (i1)
135838fd1498Szrj       || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
135938fd1498Szrj     {
136038fd1498Szrj       last1 = i1;
136138fd1498Szrj       i1 = PREV_INSN (i1);
136238fd1498Szrj     }
136338fd1498Szrj 
136438fd1498Szrj   i2 = BB_END (bb2);
136538fd1498Szrj   if (onlyjump_p (i2)
136638fd1498Szrj       || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
136738fd1498Szrj     {
136838fd1498Szrj       last2 = i2;
136938fd1498Szrj       /* Count everything except for unconditional jump as insn.
137038fd1498Szrj 	 Don't count any jumps if dir_p is NULL.  */
137138fd1498Szrj       if (!simplejump_p (i2) && !returnjump_p (i2) && last1 && dir_p)
137238fd1498Szrj 	ninsns++;
137338fd1498Szrj       i2 = PREV_INSN (i2);
137438fd1498Szrj     }
137538fd1498Szrj 
137638fd1498Szrj   while (true)
137738fd1498Szrj     {
137838fd1498Szrj       /* In the following example, we can replace all jumps to C by jumps to A.
137938fd1498Szrj 
138038fd1498Szrj          This removes 4 duplicate insns.
138138fd1498Szrj          [bb A] insn1            [bb C] insn1
138238fd1498Szrj                 insn2                   insn2
138338fd1498Szrj          [bb B] insn3                   insn3
138438fd1498Szrj                 insn4                   insn4
138538fd1498Szrj                 jump_insn               jump_insn
138638fd1498Szrj 
138738fd1498Szrj          We could also replace all jumps to A by jumps to C, but that leaves B
138838fd1498Szrj          alive, and removes only 2 duplicate insns.  In a subsequent crossjump
138938fd1498Szrj          step, all jumps to B would be replaced with jumps to the middle of C,
139038fd1498Szrj          achieving the same result with more effort.
139138fd1498Szrj          So we allow only the first possibility, which means that we don't allow
139238fd1498Szrj          fallthru in the block that's being replaced.  */
139338fd1498Szrj 
139438fd1498Szrj       follow_fallthru = dir_p && dir != dir_forward;
139538fd1498Szrj       walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru);
139638fd1498Szrj       if (did_fallthru)
139738fd1498Szrj         dir = dir_backward;
139838fd1498Szrj 
139938fd1498Szrj       follow_fallthru = dir_p && dir != dir_backward;
140038fd1498Szrj       walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru);
140138fd1498Szrj       if (did_fallthru)
140238fd1498Szrj         dir = dir_forward;
140338fd1498Szrj 
140438fd1498Szrj       if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
140538fd1498Szrj 	break;
140638fd1498Szrj 
140738fd1498Szrj       /* Do not turn corssing edge to non-crossing or vice versa after
140838fd1498Szrj 	 reload. */
140938fd1498Szrj       if (BB_PARTITION (BLOCK_FOR_INSN (i1))
141038fd1498Szrj 	  != BB_PARTITION (BLOCK_FOR_INSN (i2))
141138fd1498Szrj 	  && reload_completed)
141238fd1498Szrj 	break;
141338fd1498Szrj 
141438fd1498Szrj       dir = merge_dir (dir, old_insns_match_p (0, i1, i2));
141538fd1498Szrj       if (dir == dir_none || (!dir_p && dir != dir_both))
141638fd1498Szrj 	break;
141738fd1498Szrj 
141838fd1498Szrj       merge_memattrs (i1, i2);
141938fd1498Szrj 
142038fd1498Szrj       /* Don't begin a cross-jump with a NOTE insn.  */
142138fd1498Szrj       if (INSN_P (i1))
142238fd1498Szrj 	{
142338fd1498Szrj 	  merge_notes (i1, i2);
142438fd1498Szrj 
142538fd1498Szrj 	  afterlast1 = last1, afterlast2 = last2;
142638fd1498Szrj 	  last1 = i1, last2 = i2;
142738fd1498Szrj 	  afterlast_dir = last_dir;
142838fd1498Szrj 	  last_dir = dir;
142938fd1498Szrj 	  if (active_insn_p (i1))
143038fd1498Szrj 	    ninsns++;
143138fd1498Szrj 	}
143238fd1498Szrj 
143338fd1498Szrj       i1 = PREV_INSN (i1);
143438fd1498Szrj       i2 = PREV_INSN (i2);
143538fd1498Szrj     }
143638fd1498Szrj 
143738fd1498Szrj   /* Don't allow the insn after a compare to be shared by
143838fd1498Szrj      cross-jumping unless the compare is also shared.  */
143938fd1498Szrj   if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
144038fd1498Szrj       && ! sets_cc0_p (last1))
144138fd1498Szrj     last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--;
144238fd1498Szrj 
144338fd1498Szrj   /* Include preceding notes and labels in the cross-jump.  One,
144438fd1498Szrj      this may bring us to the head of the blocks as requested above.
144538fd1498Szrj      Two, it keeps line number notes as matched as may be.  */
144638fd1498Szrj   if (ninsns)
144738fd1498Szrj     {
144838fd1498Szrj       bb1 = BLOCK_FOR_INSN (last1);
144938fd1498Szrj       while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
145038fd1498Szrj 	last1 = PREV_INSN (last1);
145138fd1498Szrj 
145238fd1498Szrj       if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
145338fd1498Szrj 	last1 = PREV_INSN (last1);
145438fd1498Szrj 
145538fd1498Szrj       bb2 = BLOCK_FOR_INSN (last2);
145638fd1498Szrj       while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
145738fd1498Szrj 	last2 = PREV_INSN (last2);
145838fd1498Szrj 
145938fd1498Szrj       if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
146038fd1498Szrj 	last2 = PREV_INSN (last2);
146138fd1498Szrj 
146238fd1498Szrj       *f1 = last1;
146338fd1498Szrj       *f2 = last2;
146438fd1498Szrj     }
146538fd1498Szrj 
146638fd1498Szrj   if (dir_p)
146738fd1498Szrj     *dir_p = last_dir;
146838fd1498Szrj   return ninsns;
146938fd1498Szrj }
147038fd1498Szrj 
147138fd1498Szrj /* Like flow_find_cross_jump, except start looking for a matching sequence from
147238fd1498Szrj    the head of the two blocks.  Do not include jumps at the end.
147338fd1498Szrj    If STOP_AFTER is nonzero, stop after finding that many matching
147438fd1498Szrj    instructions.  If STOP_AFTER is zero, count all INSN_P insns, if it is
147538fd1498Szrj    non-zero, only count active insns.  */
147638fd1498Szrj 
147738fd1498Szrj int
flow_find_head_matching_sequence(basic_block bb1,basic_block bb2,rtx_insn ** f1,rtx_insn ** f2,int stop_after)147838fd1498Szrj flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx_insn **f1,
147938fd1498Szrj 				  rtx_insn **f2, int stop_after)
148038fd1498Szrj {
148138fd1498Szrj   rtx_insn *i1, *i2, *last1, *last2, *beforelast1, *beforelast2;
148238fd1498Szrj   int ninsns = 0;
148338fd1498Szrj   edge e;
148438fd1498Szrj   edge_iterator ei;
148538fd1498Szrj   int nehedges1 = 0, nehedges2 = 0;
148638fd1498Szrj 
148738fd1498Szrj   FOR_EACH_EDGE (e, ei, bb1->succs)
148838fd1498Szrj     if (e->flags & EDGE_EH)
148938fd1498Szrj       nehedges1++;
149038fd1498Szrj   FOR_EACH_EDGE (e, ei, bb2->succs)
149138fd1498Szrj     if (e->flags & EDGE_EH)
149238fd1498Szrj       nehedges2++;
149338fd1498Szrj 
149438fd1498Szrj   i1 = BB_HEAD (bb1);
149538fd1498Szrj   i2 = BB_HEAD (bb2);
149638fd1498Szrj   last1 = beforelast1 = last2 = beforelast2 = NULL;
149738fd1498Szrj 
149838fd1498Szrj   while (true)
149938fd1498Szrj     {
150038fd1498Szrj       /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG.  */
150138fd1498Szrj       while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
150238fd1498Szrj 	{
150338fd1498Szrj 	  if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG)
150438fd1498Szrj 	    break;
150538fd1498Szrj 	  i1 = NEXT_INSN (i1);
150638fd1498Szrj 	}
150738fd1498Szrj 
150838fd1498Szrj       while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
150938fd1498Szrj 	{
151038fd1498Szrj 	  if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG)
151138fd1498Szrj 	    break;
151238fd1498Szrj 	  i2 = NEXT_INSN (i2);
151338fd1498Szrj 	}
151438fd1498Szrj 
151538fd1498Szrj       if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1))
151638fd1498Szrj 	  || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2)))
151738fd1498Szrj 	break;
151838fd1498Szrj 
151938fd1498Szrj       if (NOTE_P (i1) || NOTE_P (i2)
152038fd1498Szrj 	  || JUMP_P (i1) || JUMP_P (i2))
152138fd1498Szrj 	break;
152238fd1498Szrj 
152338fd1498Szrj       /* A sanity check to make sure we're not merging insns with different
152438fd1498Szrj 	 effects on EH.  If only one of them ends a basic block, it shouldn't
152538fd1498Szrj 	 have an EH edge; if both end a basic block, there should be the same
152638fd1498Szrj 	 number of EH edges.  */
152738fd1498Szrj       if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
152838fd1498Szrj 	   && nehedges1 > 0)
152938fd1498Szrj 	  || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
153038fd1498Szrj 	      && nehedges2 > 0)
153138fd1498Szrj 	  || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
153238fd1498Szrj 	      && nehedges1 != nehedges2))
153338fd1498Szrj 	break;
153438fd1498Szrj 
153538fd1498Szrj       if (old_insns_match_p (0, i1, i2) != dir_both)
153638fd1498Szrj 	break;
153738fd1498Szrj 
153838fd1498Szrj       merge_memattrs (i1, i2);
153938fd1498Szrj 
154038fd1498Szrj       /* Don't begin a cross-jump with a NOTE insn.  */
154138fd1498Szrj       if (INSN_P (i1))
154238fd1498Szrj 	{
154338fd1498Szrj 	  merge_notes (i1, i2);
154438fd1498Szrj 
154538fd1498Szrj 	  beforelast1 = last1, beforelast2 = last2;
154638fd1498Szrj 	  last1 = i1, last2 = i2;
154738fd1498Szrj 	  if (!stop_after || active_insn_p (i1))
154838fd1498Szrj 	    ninsns++;
154938fd1498Szrj 	}
155038fd1498Szrj 
155138fd1498Szrj       if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
155238fd1498Szrj 	  || (stop_after > 0 && ninsns == stop_after))
155338fd1498Szrj 	break;
155438fd1498Szrj 
155538fd1498Szrj       i1 = NEXT_INSN (i1);
155638fd1498Szrj       i2 = NEXT_INSN (i2);
155738fd1498Szrj     }
155838fd1498Szrj 
155938fd1498Szrj   /* Don't allow a compare to be shared by cross-jumping unless the insn
156038fd1498Szrj      after the compare is also shared.  */
156138fd1498Szrj   if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
156238fd1498Szrj       && sets_cc0_p (last1))
156338fd1498Szrj     last1 = beforelast1, last2 = beforelast2, ninsns--;
156438fd1498Szrj 
156538fd1498Szrj   if (ninsns)
156638fd1498Szrj     {
156738fd1498Szrj       *f1 = last1;
156838fd1498Szrj       *f2 = last2;
156938fd1498Szrj     }
157038fd1498Szrj 
157138fd1498Szrj   return ninsns;
157238fd1498Szrj }
157338fd1498Szrj 
157438fd1498Szrj /* Return true iff outgoing edges of BB1 and BB2 match, together with
157538fd1498Szrj    the branch instruction.  This means that if we commonize the control
157638fd1498Szrj    flow before end of the basic block, the semantic remains unchanged.
157738fd1498Szrj 
157838fd1498Szrj    We may assume that there exists one edge with a common destination.  */
157938fd1498Szrj 
158038fd1498Szrj static bool
outgoing_edges_match(int mode,basic_block bb1,basic_block bb2)158138fd1498Szrj outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
158238fd1498Szrj {
158338fd1498Szrj   int nehedges1 = 0, nehedges2 = 0;
158438fd1498Szrj   edge fallthru1 = 0, fallthru2 = 0;
158538fd1498Szrj   edge e1, e2;
158638fd1498Szrj   edge_iterator ei;
158738fd1498Szrj 
158838fd1498Szrj   /* If we performed shrink-wrapping, edges to the exit block can
158938fd1498Szrj      only be distinguished for JUMP_INSNs.  The two paths may differ in
159038fd1498Szrj      whether they went through the prologue.  Sibcalls are fine, we know
159138fd1498Szrj      that we either didn't need or inserted an epilogue before them.  */
159238fd1498Szrj   if (crtl->shrink_wrapped
159338fd1498Szrj       && single_succ_p (bb1)
159438fd1498Szrj       && single_succ (bb1) == EXIT_BLOCK_PTR_FOR_FN (cfun)
1595*58e805e6Szrj       && (!JUMP_P (BB_END (bb1))
1596*58e805e6Szrj 	  /* Punt if the only successor is a fake edge to exit, the jump
1597*58e805e6Szrj 	     must be some weird one.  */
1598*58e805e6Szrj 	  || (single_succ_edge (bb1)->flags & EDGE_FAKE) != 0)
159938fd1498Szrj       && !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1))))
160038fd1498Szrj     return false;
160138fd1498Szrj 
160238fd1498Szrj   /* If BB1 has only one successor, we may be looking at either an
160338fd1498Szrj      unconditional jump, or a fake edge to exit.  */
160438fd1498Szrj   if (single_succ_p (bb1)
160538fd1498Szrj       && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
160638fd1498Szrj       && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
160738fd1498Szrj     return (single_succ_p (bb2)
160838fd1498Szrj 	    && (single_succ_edge (bb2)->flags
160938fd1498Szrj 		& (EDGE_COMPLEX | EDGE_FAKE)) == 0
161038fd1498Szrj 	    && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
161138fd1498Szrj 
161238fd1498Szrj   /* Match conditional jumps - this may get tricky when fallthru and branch
161338fd1498Szrj      edges are crossed.  */
161438fd1498Szrj   if (EDGE_COUNT (bb1->succs) == 2
161538fd1498Szrj       && any_condjump_p (BB_END (bb1))
161638fd1498Szrj       && onlyjump_p (BB_END (bb1)))
161738fd1498Szrj     {
161838fd1498Szrj       edge b1, f1, b2, f2;
161938fd1498Szrj       bool reverse, match;
162038fd1498Szrj       rtx set1, set2, cond1, cond2;
162138fd1498Szrj       enum rtx_code code1, code2;
162238fd1498Szrj 
162338fd1498Szrj       if (EDGE_COUNT (bb2->succs) != 2
162438fd1498Szrj 	  || !any_condjump_p (BB_END (bb2))
162538fd1498Szrj 	  || !onlyjump_p (BB_END (bb2)))
162638fd1498Szrj 	return false;
162738fd1498Szrj 
162838fd1498Szrj       b1 = BRANCH_EDGE (bb1);
162938fd1498Szrj       b2 = BRANCH_EDGE (bb2);
163038fd1498Szrj       f1 = FALLTHRU_EDGE (bb1);
163138fd1498Szrj       f2 = FALLTHRU_EDGE (bb2);
163238fd1498Szrj 
163338fd1498Szrj       /* Get around possible forwarders on fallthru edges.  Other cases
163438fd1498Szrj 	 should be optimized out already.  */
163538fd1498Szrj       if (FORWARDER_BLOCK_P (f1->dest))
163638fd1498Szrj 	f1 = single_succ_edge (f1->dest);
163738fd1498Szrj 
163838fd1498Szrj       if (FORWARDER_BLOCK_P (f2->dest))
163938fd1498Szrj 	f2 = single_succ_edge (f2->dest);
164038fd1498Szrj 
164138fd1498Szrj       /* To simplify use of this function, return false if there are
164238fd1498Szrj 	 unneeded forwarder blocks.  These will get eliminated later
164338fd1498Szrj 	 during cleanup_cfg.  */
164438fd1498Szrj       if (FORWARDER_BLOCK_P (f1->dest)
164538fd1498Szrj 	  || FORWARDER_BLOCK_P (f2->dest)
164638fd1498Szrj 	  || FORWARDER_BLOCK_P (b1->dest)
164738fd1498Szrj 	  || FORWARDER_BLOCK_P (b2->dest))
164838fd1498Szrj 	return false;
164938fd1498Szrj 
165038fd1498Szrj       if (f1->dest == f2->dest && b1->dest == b2->dest)
165138fd1498Szrj 	reverse = false;
165238fd1498Szrj       else if (f1->dest == b2->dest && b1->dest == f2->dest)
165338fd1498Szrj 	reverse = true;
165438fd1498Szrj       else
165538fd1498Szrj 	return false;
165638fd1498Szrj 
165738fd1498Szrj       set1 = pc_set (BB_END (bb1));
165838fd1498Szrj       set2 = pc_set (BB_END (bb2));
165938fd1498Szrj       if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
166038fd1498Szrj 	  != (XEXP (SET_SRC (set2), 1) == pc_rtx))
166138fd1498Szrj 	reverse = !reverse;
166238fd1498Szrj 
166338fd1498Szrj       cond1 = XEXP (SET_SRC (set1), 0);
166438fd1498Szrj       cond2 = XEXP (SET_SRC (set2), 0);
166538fd1498Szrj       code1 = GET_CODE (cond1);
166638fd1498Szrj       if (reverse)
166738fd1498Szrj 	code2 = reversed_comparison_code (cond2, BB_END (bb2));
166838fd1498Szrj       else
166938fd1498Szrj 	code2 = GET_CODE (cond2);
167038fd1498Szrj 
167138fd1498Szrj       if (code2 == UNKNOWN)
167238fd1498Szrj 	return false;
167338fd1498Szrj 
167438fd1498Szrj       /* Verify codes and operands match.  */
167538fd1498Szrj       match = ((code1 == code2
167638fd1498Szrj 		&& rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
167738fd1498Szrj 		&& rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
167838fd1498Szrj 	       || (code1 == swap_condition (code2)
167938fd1498Szrj 		   && rtx_renumbered_equal_p (XEXP (cond1, 1),
168038fd1498Szrj 					      XEXP (cond2, 0))
168138fd1498Szrj 		   && rtx_renumbered_equal_p (XEXP (cond1, 0),
168238fd1498Szrj 					      XEXP (cond2, 1))));
168338fd1498Szrj 
168438fd1498Szrj       /* If we return true, we will join the blocks.  Which means that
168538fd1498Szrj 	 we will only have one branch prediction bit to work with.  Thus
168638fd1498Szrj 	 we require the existing branches to have probabilities that are
168738fd1498Szrj 	 roughly similar.  */
168838fd1498Szrj       if (match
168938fd1498Szrj 	  && optimize_bb_for_speed_p (bb1)
169038fd1498Szrj 	  && optimize_bb_for_speed_p (bb2))
169138fd1498Szrj 	{
169238fd1498Szrj 	  profile_probability prob2;
169338fd1498Szrj 
169438fd1498Szrj 	  if (b1->dest == b2->dest)
169538fd1498Szrj 	    prob2 = b2->probability;
169638fd1498Szrj 	  else
169738fd1498Szrj 	    /* Do not use f2 probability as f2 may be forwarded.  */
169838fd1498Szrj 	    prob2 = b2->probability.invert ();
169938fd1498Szrj 
170038fd1498Szrj 	  /* Fail if the difference in probabilities is greater than 50%.
170138fd1498Szrj 	     This rules out two well-predicted branches with opposite
170238fd1498Szrj 	     outcomes.  */
170338fd1498Szrj 	  if (b1->probability.differs_lot_from_p (prob2))
170438fd1498Szrj 	    {
170538fd1498Szrj 	      if (dump_file)
170638fd1498Szrj 		{
170738fd1498Szrj 		  fprintf (dump_file,
170838fd1498Szrj 			   "Outcomes of branch in bb %i and %i differ too"
170938fd1498Szrj 			   " much (", bb1->index, bb2->index);
171038fd1498Szrj 		  b1->probability.dump (dump_file);
171138fd1498Szrj 		  prob2.dump (dump_file);
171238fd1498Szrj 		  fprintf (dump_file, ")\n");
171338fd1498Szrj 		}
171438fd1498Szrj 	      return false;
171538fd1498Szrj 	    }
171638fd1498Szrj 	}
171738fd1498Szrj 
171838fd1498Szrj       if (dump_file && match)
171938fd1498Szrj 	fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
172038fd1498Szrj 		 bb1->index, bb2->index);
172138fd1498Szrj 
172238fd1498Szrj       return match;
172338fd1498Szrj     }
172438fd1498Szrj 
172538fd1498Szrj   /* Generic case - we are seeing a computed jump, table jump or trapping
172638fd1498Szrj      instruction.  */
172738fd1498Szrj 
172838fd1498Szrj   /* Check whether there are tablejumps in the end of BB1 and BB2.
172938fd1498Szrj      Return true if they are identical.  */
173038fd1498Szrj     {
173138fd1498Szrj       rtx_insn *label1, *label2;
173238fd1498Szrj       rtx_jump_table_data *table1, *table2;
173338fd1498Szrj 
173438fd1498Szrj       if (tablejump_p (BB_END (bb1), &label1, &table1)
173538fd1498Szrj 	  && tablejump_p (BB_END (bb2), &label2, &table2)
173638fd1498Szrj 	  && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
173738fd1498Szrj 	{
173838fd1498Szrj 	  /* The labels should never be the same rtx.  If they really are same
173938fd1498Szrj 	     the jump tables are same too. So disable crossjumping of blocks BB1
174038fd1498Szrj 	     and BB2 because when deleting the common insns in the end of BB1
174138fd1498Szrj 	     by delete_basic_block () the jump table would be deleted too.  */
174238fd1498Szrj 	  /* If LABEL2 is referenced in BB1->END do not do anything
174338fd1498Szrj 	     because we would loose information when replacing
174438fd1498Szrj 	     LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END.  */
174538fd1498Szrj 	  if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
174638fd1498Szrj 	    {
174738fd1498Szrj 	      /* Set IDENTICAL to true when the tables are identical.  */
174838fd1498Szrj 	      bool identical = false;
174938fd1498Szrj 	      rtx p1, p2;
175038fd1498Szrj 
175138fd1498Szrj 	      p1 = PATTERN (table1);
175238fd1498Szrj 	      p2 = PATTERN (table2);
175338fd1498Szrj 	      if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
175438fd1498Szrj 		{
175538fd1498Szrj 		  identical = true;
175638fd1498Szrj 		}
175738fd1498Szrj 	      else if (GET_CODE (p1) == ADDR_DIFF_VEC
175838fd1498Szrj 		       && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
175938fd1498Szrj 		       && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
176038fd1498Szrj 		       && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
176138fd1498Szrj 		{
176238fd1498Szrj 		  int i;
176338fd1498Szrj 
176438fd1498Szrj 		  identical = true;
176538fd1498Szrj 		  for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
176638fd1498Szrj 		    if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
176738fd1498Szrj 		      identical = false;
176838fd1498Szrj 		}
176938fd1498Szrj 
177038fd1498Szrj 	      if (identical)
177138fd1498Szrj 		{
177238fd1498Szrj 		  bool match;
177338fd1498Szrj 
177438fd1498Szrj 		  /* Temporarily replace references to LABEL1 with LABEL2
177538fd1498Szrj 		     in BB1->END so that we could compare the instructions.  */
177638fd1498Szrj 		  replace_label_in_insn (BB_END (bb1), label1, label2, false);
177738fd1498Szrj 
177838fd1498Szrj 		  match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))
177938fd1498Szrj 			   == dir_both);
178038fd1498Szrj 		  if (dump_file && match)
178138fd1498Szrj 		    fprintf (dump_file,
178238fd1498Szrj 			     "Tablejumps in bb %i and %i match.\n",
178338fd1498Szrj 			     bb1->index, bb2->index);
178438fd1498Szrj 
178538fd1498Szrj 		  /* Set the original label in BB1->END because when deleting
178638fd1498Szrj 		     a block whose end is a tablejump, the tablejump referenced
178738fd1498Szrj 		     from the instruction is deleted too.  */
178838fd1498Szrj 		  replace_label_in_insn (BB_END (bb1), label2, label1, false);
178938fd1498Szrj 
179038fd1498Szrj 		  return match;
179138fd1498Szrj 		}
179238fd1498Szrj 	    }
179338fd1498Szrj 	  return false;
179438fd1498Szrj 	}
179538fd1498Szrj     }
179638fd1498Szrj 
179738fd1498Szrj   /* Find the last non-debug non-note instruction in each bb, except
179838fd1498Szrj      stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p
179938fd1498Szrj      handles that case specially. old_insns_match_p does not handle
180038fd1498Szrj      other types of instruction notes.  */
180138fd1498Szrj   rtx_insn *last1 = BB_END (bb1);
180238fd1498Szrj   rtx_insn *last2 = BB_END (bb2);
180338fd1498Szrj   while (!NOTE_INSN_BASIC_BLOCK_P (last1) &&
180438fd1498Szrj          (DEBUG_INSN_P (last1) || NOTE_P (last1)))
180538fd1498Szrj     last1 = PREV_INSN (last1);
180638fd1498Szrj   while (!NOTE_INSN_BASIC_BLOCK_P (last2) &&
180738fd1498Szrj          (DEBUG_INSN_P (last2) || NOTE_P (last2)))
180838fd1498Szrj     last2 = PREV_INSN (last2);
180938fd1498Szrj   gcc_assert (last1 && last2);
181038fd1498Szrj 
181138fd1498Szrj   /* First ensure that the instructions match.  There may be many outgoing
181238fd1498Szrj      edges so this test is generally cheaper.  */
181338fd1498Szrj   if (old_insns_match_p (mode, last1, last2) != dir_both)
181438fd1498Szrj     return false;
181538fd1498Szrj 
181638fd1498Szrj   /* Search the outgoing edges, ensure that the counts do match, find possible
181738fd1498Szrj      fallthru and exception handling edges since these needs more
181838fd1498Szrj      validation.  */
181938fd1498Szrj   if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
182038fd1498Szrj     return false;
182138fd1498Szrj 
182238fd1498Szrj   bool nonfakeedges = false;
182338fd1498Szrj   FOR_EACH_EDGE (e1, ei, bb1->succs)
182438fd1498Szrj     {
182538fd1498Szrj       e2 = EDGE_SUCC (bb2, ei.index);
182638fd1498Szrj 
182738fd1498Szrj       if ((e1->flags & EDGE_FAKE) == 0)
182838fd1498Szrj 	nonfakeedges = true;
182938fd1498Szrj 
183038fd1498Szrj       if (e1->flags & EDGE_EH)
183138fd1498Szrj 	nehedges1++;
183238fd1498Szrj 
183338fd1498Szrj       if (e2->flags & EDGE_EH)
183438fd1498Szrj 	nehedges2++;
183538fd1498Szrj 
183638fd1498Szrj       if (e1->flags & EDGE_FALLTHRU)
183738fd1498Szrj 	fallthru1 = e1;
183838fd1498Szrj       if (e2->flags & EDGE_FALLTHRU)
183938fd1498Szrj 	fallthru2 = e2;
184038fd1498Szrj     }
184138fd1498Szrj 
184238fd1498Szrj   /* If number of edges of various types does not match, fail.  */
184338fd1498Szrj   if (nehedges1 != nehedges2
184438fd1498Szrj       || (fallthru1 != 0) != (fallthru2 != 0))
184538fd1498Szrj     return false;
184638fd1498Szrj 
184738fd1498Szrj   /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors
184838fd1498Szrj      and the last real insn doesn't have REG_ARGS_SIZE note, don't
184938fd1498Szrj      attempt to optimize, as the two basic blocks might have different
185038fd1498Szrj      REG_ARGS_SIZE depths.  For noreturn calls and unconditional
185138fd1498Szrj      traps there should be REG_ARG_SIZE notes, they could be missing
185238fd1498Szrj      for __builtin_unreachable () uses though.  */
185338fd1498Szrj   if (!nonfakeedges
185438fd1498Szrj       && !ACCUMULATE_OUTGOING_ARGS
185538fd1498Szrj       && (!INSN_P (last1)
185638fd1498Szrj           || !find_reg_note (last1, REG_ARGS_SIZE, NULL)))
185738fd1498Szrj     return false;
185838fd1498Szrj 
185938fd1498Szrj   /* fallthru edges must be forwarded to the same destination.  */
186038fd1498Szrj   if (fallthru1)
186138fd1498Szrj     {
186238fd1498Szrj       basic_block d1 = (forwarder_block_p (fallthru1->dest)
186338fd1498Szrj 			? single_succ (fallthru1->dest): fallthru1->dest);
186438fd1498Szrj       basic_block d2 = (forwarder_block_p (fallthru2->dest)
186538fd1498Szrj 			? single_succ (fallthru2->dest): fallthru2->dest);
186638fd1498Szrj 
186738fd1498Szrj       if (d1 != d2)
186838fd1498Szrj 	return false;
186938fd1498Szrj     }
187038fd1498Szrj 
187138fd1498Szrj   /* Ensure the same EH region.  */
187238fd1498Szrj   {
187338fd1498Szrj     rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
187438fd1498Szrj     rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
187538fd1498Szrj 
187638fd1498Szrj     if (!n1 && n2)
187738fd1498Szrj       return false;
187838fd1498Szrj 
187938fd1498Szrj     if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
188038fd1498Szrj       return false;
188138fd1498Szrj   }
188238fd1498Szrj 
188338fd1498Szrj   /* The same checks as in try_crossjump_to_edge. It is required for RTL
188438fd1498Szrj      version of sequence abstraction.  */
188538fd1498Szrj   FOR_EACH_EDGE (e1, ei, bb2->succs)
188638fd1498Szrj     {
188738fd1498Szrj       edge e2;
188838fd1498Szrj       edge_iterator ei;
188938fd1498Szrj       basic_block d1 = e1->dest;
189038fd1498Szrj 
189138fd1498Szrj       if (FORWARDER_BLOCK_P (d1))
189238fd1498Szrj         d1 = EDGE_SUCC (d1, 0)->dest;
189338fd1498Szrj 
189438fd1498Szrj       FOR_EACH_EDGE (e2, ei, bb1->succs)
189538fd1498Szrj         {
189638fd1498Szrj           basic_block d2 = e2->dest;
189738fd1498Szrj           if (FORWARDER_BLOCK_P (d2))
189838fd1498Szrj             d2 = EDGE_SUCC (d2, 0)->dest;
189938fd1498Szrj           if (d1 == d2)
190038fd1498Szrj             break;
190138fd1498Szrj         }
190238fd1498Szrj 
190338fd1498Szrj       if (!e2)
190438fd1498Szrj         return false;
190538fd1498Szrj     }
190638fd1498Szrj 
190738fd1498Szrj   return true;
190838fd1498Szrj }
190938fd1498Szrj 
191038fd1498Szrj /* Returns true if BB basic block has a preserve label.  */
191138fd1498Szrj 
191238fd1498Szrj static bool
block_has_preserve_label(basic_block bb)191338fd1498Szrj block_has_preserve_label (basic_block bb)
191438fd1498Szrj {
191538fd1498Szrj   return (bb
191638fd1498Szrj           && block_label (bb)
191738fd1498Szrj           && LABEL_PRESERVE_P (block_label (bb)));
191838fd1498Szrj }
191938fd1498Szrj 
192038fd1498Szrj /* E1 and E2 are edges with the same destination block.  Search their
192138fd1498Szrj    predecessors for common code.  If found, redirect control flow from
192238fd1498Szrj    (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
192338fd1498Szrj    or the other way around (dir_backward).  DIR specifies the allowed
192438fd1498Szrj    replacement direction.  */
192538fd1498Szrj 
192638fd1498Szrj static bool
try_crossjump_to_edge(int mode,edge e1,edge e2,enum replace_direction dir)192738fd1498Szrj try_crossjump_to_edge (int mode, edge e1, edge e2,
192838fd1498Szrj                        enum replace_direction dir)
192938fd1498Szrj {
193038fd1498Szrj   int nmatch;
193138fd1498Szrj   basic_block src1 = e1->src, src2 = e2->src;
193238fd1498Szrj   basic_block redirect_to, redirect_from, to_remove;
193338fd1498Szrj   basic_block osrc1, osrc2, redirect_edges_to, tmp;
193438fd1498Szrj   rtx_insn *newpos1, *newpos2;
193538fd1498Szrj   edge s;
193638fd1498Szrj   edge_iterator ei;
193738fd1498Szrj 
193838fd1498Szrj   newpos1 = newpos2 = NULL;
193938fd1498Szrj 
194038fd1498Szrj   /* Search backward through forwarder blocks.  We don't need to worry
194138fd1498Szrj      about multiple entry or chained forwarders, as they will be optimized
194238fd1498Szrj      away.  We do this to look past the unconditional jump following a
194338fd1498Szrj      conditional jump that is required due to the current CFG shape.  */
194438fd1498Szrj   if (single_pred_p (src1)
194538fd1498Szrj       && FORWARDER_BLOCK_P (src1))
194638fd1498Szrj     e1 = single_pred_edge (src1), src1 = e1->src;
194738fd1498Szrj 
194838fd1498Szrj   if (single_pred_p (src2)
194938fd1498Szrj       && FORWARDER_BLOCK_P (src2))
195038fd1498Szrj     e2 = single_pred_edge (src2), src2 = e2->src;
195138fd1498Szrj 
195238fd1498Szrj   /* Nothing to do if we reach ENTRY, or a common source block.  */
195338fd1498Szrj   if (src1 == ENTRY_BLOCK_PTR_FOR_FN (cfun) || src2
195438fd1498Szrj       == ENTRY_BLOCK_PTR_FOR_FN (cfun))
195538fd1498Szrj     return false;
195638fd1498Szrj   if (src1 == src2)
195738fd1498Szrj     return false;
195838fd1498Szrj 
195938fd1498Szrj   /* Seeing more than 1 forwarder blocks would confuse us later...  */
196038fd1498Szrj   if (FORWARDER_BLOCK_P (e1->dest)
196138fd1498Szrj       && FORWARDER_BLOCK_P (single_succ (e1->dest)))
196238fd1498Szrj     return false;
196338fd1498Szrj 
196438fd1498Szrj   if (FORWARDER_BLOCK_P (e2->dest)
196538fd1498Szrj       && FORWARDER_BLOCK_P (single_succ (e2->dest)))
196638fd1498Szrj     return false;
196738fd1498Szrj 
196838fd1498Szrj   /* Likewise with dead code (possibly newly created by the other optimizations
196938fd1498Szrj      of cfg_cleanup).  */
197038fd1498Szrj   if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
197138fd1498Szrj     return false;
197238fd1498Szrj 
197338fd1498Szrj   /* Do not turn corssing edge to non-crossing or vice versa after reload.  */
197438fd1498Szrj   if (BB_PARTITION (src1) != BB_PARTITION (src2)
197538fd1498Szrj       && reload_completed)
197638fd1498Szrj     return false;
197738fd1498Szrj 
197838fd1498Szrj   /* Look for the common insn sequence, part the first ...  */
197938fd1498Szrj   if (!outgoing_edges_match (mode, src1, src2))
198038fd1498Szrj     return false;
198138fd1498Szrj 
198238fd1498Szrj   /* ... and part the second.  */
198338fd1498Szrj   nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir);
198438fd1498Szrj 
198538fd1498Szrj   osrc1 = src1;
198638fd1498Szrj   osrc2 = src2;
198738fd1498Szrj   if (newpos1 != NULL_RTX)
198838fd1498Szrj     src1 = BLOCK_FOR_INSN (newpos1);
198938fd1498Szrj   if (newpos2 != NULL_RTX)
199038fd1498Szrj     src2 = BLOCK_FOR_INSN (newpos2);
199138fd1498Szrj 
199238fd1498Szrj   /* Check that SRC1 and SRC2 have preds again.  They may have changed
199338fd1498Szrj      above due to the call to flow_find_cross_jump.  */
199438fd1498Szrj   if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
199538fd1498Szrj     return false;
199638fd1498Szrj 
199738fd1498Szrj   if (dir == dir_backward)
199838fd1498Szrj     {
199938fd1498Szrj       std::swap (osrc1, osrc2);
200038fd1498Szrj       std::swap (src1, src2);
200138fd1498Szrj       std::swap (e1, e2);
200238fd1498Szrj       std::swap (newpos1, newpos2);
200338fd1498Szrj     }
200438fd1498Szrj 
200538fd1498Szrj   /* Don't proceed with the crossjump unless we found a sufficient number
200638fd1498Szrj      of matching instructions or the 'from' block was totally matched
200738fd1498Szrj      (such that its predecessors will hopefully be redirected and the
200838fd1498Szrj      block removed).  */
200938fd1498Szrj   if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
201038fd1498Szrj       && (newpos1 != BB_HEAD (src1)))
201138fd1498Szrj     return false;
201238fd1498Szrj 
201338fd1498Szrj   /* Avoid deleting preserve label when redirecting ABNORMAL edges.  */
201438fd1498Szrj   if (block_has_preserve_label (e1->dest)
201538fd1498Szrj       && (e1->flags & EDGE_ABNORMAL))
201638fd1498Szrj     return false;
201738fd1498Szrj 
201838fd1498Szrj   /* Here we know that the insns in the end of SRC1 which are common with SRC2
201938fd1498Szrj      will be deleted.
202038fd1498Szrj      If we have tablejumps in the end of SRC1 and SRC2
202138fd1498Szrj      they have been already compared for equivalence in outgoing_edges_match ()
202238fd1498Szrj      so replace the references to TABLE1 by references to TABLE2.  */
202338fd1498Szrj   {
202438fd1498Szrj       rtx_insn *label1, *label2;
202538fd1498Szrj       rtx_jump_table_data *table1, *table2;
202638fd1498Szrj 
202738fd1498Szrj       if (tablejump_p (BB_END (osrc1), &label1, &table1)
202838fd1498Szrj 	  && tablejump_p (BB_END (osrc2), &label2, &table2)
202938fd1498Szrj 	  && label1 != label2)
203038fd1498Szrj 	{
203138fd1498Szrj 	  rtx_insn *insn;
203238fd1498Szrj 
203338fd1498Szrj 	  /* Replace references to LABEL1 with LABEL2.  */
203438fd1498Szrj 	  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
203538fd1498Szrj 	    {
203638fd1498Szrj 	      /* Do not replace the label in SRC1->END because when deleting
203738fd1498Szrj 		 a block whose end is a tablejump, the tablejump referenced
203838fd1498Szrj 		 from the instruction is deleted too.  */
203938fd1498Szrj 	      if (insn != BB_END (osrc1))
204038fd1498Szrj 		replace_label_in_insn (insn, label1, label2, true);
204138fd1498Szrj 	    }
204238fd1498Szrj 	}
204338fd1498Szrj   }
204438fd1498Szrj 
204538fd1498Szrj   /* Avoid splitting if possible.  We must always split when SRC2 has
204638fd1498Szrj      EH predecessor edges, or we may end up with basic blocks with both
204738fd1498Szrj      normal and EH predecessor edges.  */
204838fd1498Szrj   if (newpos2 == BB_HEAD (src2)
204938fd1498Szrj       && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
205038fd1498Szrj     redirect_to = src2;
205138fd1498Szrj   else
205238fd1498Szrj     {
205338fd1498Szrj       if (newpos2 == BB_HEAD (src2))
205438fd1498Szrj 	{
205538fd1498Szrj 	  /* Skip possible basic block header.  */
205638fd1498Szrj 	  if (LABEL_P (newpos2))
205738fd1498Szrj 	    newpos2 = NEXT_INSN (newpos2);
205838fd1498Szrj 	  while (DEBUG_INSN_P (newpos2))
205938fd1498Szrj 	    newpos2 = NEXT_INSN (newpos2);
206038fd1498Szrj 	  if (NOTE_P (newpos2))
206138fd1498Szrj 	    newpos2 = NEXT_INSN (newpos2);
206238fd1498Szrj 	  while (DEBUG_INSN_P (newpos2))
206338fd1498Szrj 	    newpos2 = NEXT_INSN (newpos2);
206438fd1498Szrj 	}
206538fd1498Szrj 
206638fd1498Szrj       if (dump_file)
206738fd1498Szrj 	fprintf (dump_file, "Splitting bb %i before %i insns\n",
206838fd1498Szrj 		 src2->index, nmatch);
206938fd1498Szrj       redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
207038fd1498Szrj     }
207138fd1498Szrj 
207238fd1498Szrj   if (dump_file)
207338fd1498Szrj     fprintf (dump_file,
207438fd1498Szrj 	     "Cross jumping from bb %i to bb %i; %i common insns\n",
207538fd1498Szrj 	     src1->index, src2->index, nmatch);
207638fd1498Szrj 
207738fd1498Szrj   /* We may have some registers visible through the block.  */
207838fd1498Szrj   df_set_bb_dirty (redirect_to);
207938fd1498Szrj 
208038fd1498Szrj   if (osrc2 == src2)
208138fd1498Szrj     redirect_edges_to = redirect_to;
208238fd1498Szrj   else
208338fd1498Szrj     redirect_edges_to = osrc2;
208438fd1498Szrj 
208538fd1498Szrj   /* Recompute the counts of destinations of outgoing edges.  */
208638fd1498Szrj   FOR_EACH_EDGE (s, ei, redirect_edges_to->succs)
208738fd1498Szrj     {
208838fd1498Szrj       edge s2;
208938fd1498Szrj       edge_iterator ei;
209038fd1498Szrj       basic_block d = s->dest;
209138fd1498Szrj 
209238fd1498Szrj       if (FORWARDER_BLOCK_P (d))
209338fd1498Szrj 	d = single_succ (d);
209438fd1498Szrj 
209538fd1498Szrj       FOR_EACH_EDGE (s2, ei, src1->succs)
209638fd1498Szrj 	{
209738fd1498Szrj 	  basic_block d2 = s2->dest;
209838fd1498Szrj 	  if (FORWARDER_BLOCK_P (d2))
209938fd1498Szrj 	    d2 = single_succ (d2);
210038fd1498Szrj 	  if (d == d2)
210138fd1498Szrj 	    break;
210238fd1498Szrj 	}
210338fd1498Szrj 
210438fd1498Szrj       /* Take care to update possible forwarder blocks.  We verified
210538fd1498Szrj 	 that there is no more than one in the chain, so we can't run
210638fd1498Szrj 	 into infinite loop.  */
210738fd1498Szrj       if (FORWARDER_BLOCK_P (s->dest))
210838fd1498Szrj 	s->dest->count += s->count ();
210938fd1498Szrj 
211038fd1498Szrj       if (FORWARDER_BLOCK_P (s2->dest))
211138fd1498Szrj 	s2->dest->count -= s->count ();
211238fd1498Szrj 
211338fd1498Szrj       s->probability = s->probability.combine_with_count
211438fd1498Szrj 			  (redirect_edges_to->count,
211538fd1498Szrj 			   s2->probability, src1->count);
211638fd1498Szrj     }
211738fd1498Szrj 
211838fd1498Szrj   /* Adjust count for the block.  An earlier jump
211938fd1498Szrj      threading pass may have left the profile in an inconsistent
212038fd1498Szrj      state (see update_bb_profile_for_threading) so we must be
212138fd1498Szrj      prepared for overflows.  */
212238fd1498Szrj   tmp = redirect_to;
212338fd1498Szrj   do
212438fd1498Szrj     {
212538fd1498Szrj       tmp->count += src1->count;
212638fd1498Szrj       if (tmp == redirect_edges_to)
212738fd1498Szrj         break;
212838fd1498Szrj       tmp = find_fallthru_edge (tmp->succs)->dest;
212938fd1498Szrj     }
213038fd1498Szrj   while (true);
213138fd1498Szrj   update_br_prob_note (redirect_edges_to);
213238fd1498Szrj 
213338fd1498Szrj   /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1.  */
213438fd1498Szrj 
213538fd1498Szrj   /* Skip possible basic block header.  */
213638fd1498Szrj   if (LABEL_P (newpos1))
213738fd1498Szrj     newpos1 = NEXT_INSN (newpos1);
213838fd1498Szrj 
213938fd1498Szrj   while (DEBUG_INSN_P (newpos1))
214038fd1498Szrj     newpos1 = NEXT_INSN (newpos1);
214138fd1498Szrj 
214238fd1498Szrj   if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
214338fd1498Szrj     newpos1 = NEXT_INSN (newpos1);
214438fd1498Szrj 
214538fd1498Szrj   while (DEBUG_INSN_P (newpos1))
214638fd1498Szrj     newpos1 = NEXT_INSN (newpos1);
214738fd1498Szrj 
214838fd1498Szrj   redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
214938fd1498Szrj   to_remove = single_succ (redirect_from);
215038fd1498Szrj 
215138fd1498Szrj   redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
215238fd1498Szrj   delete_basic_block (to_remove);
215338fd1498Szrj 
215438fd1498Szrj   update_forwarder_flag (redirect_from);
215538fd1498Szrj   if (redirect_to != src2)
215638fd1498Szrj     update_forwarder_flag (src2);
215738fd1498Szrj 
215838fd1498Szrj   return true;
215938fd1498Szrj }
216038fd1498Szrj 
216138fd1498Szrj /* Search the predecessors of BB for common insn sequences.  When found,
216238fd1498Szrj    share code between them by redirecting control flow.  Return true if
216338fd1498Szrj    any changes made.  */
216438fd1498Szrj 
216538fd1498Szrj static bool
try_crossjump_bb(int mode,basic_block bb)216638fd1498Szrj try_crossjump_bb (int mode, basic_block bb)
216738fd1498Szrj {
216838fd1498Szrj   edge e, e2, fallthru;
216938fd1498Szrj   bool changed;
217038fd1498Szrj   unsigned max, ix, ix2;
217138fd1498Szrj 
217238fd1498Szrj   /* Nothing to do if there is not at least two incoming edges.  */
217338fd1498Szrj   if (EDGE_COUNT (bb->preds) < 2)
217438fd1498Szrj     return false;
217538fd1498Szrj 
217638fd1498Szrj   /* Don't crossjump if this block ends in a computed jump,
217738fd1498Szrj      unless we are optimizing for size.  */
217838fd1498Szrj   if (optimize_bb_for_size_p (bb)
217938fd1498Szrj       && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
218038fd1498Szrj       && computed_jump_p (BB_END (bb)))
218138fd1498Szrj     return false;
218238fd1498Szrj 
218338fd1498Szrj   /* If we are partitioning hot/cold basic blocks, we don't want to
218438fd1498Szrj      mess up unconditional or indirect jumps that cross between hot
218538fd1498Szrj      and cold sections.
218638fd1498Szrj 
218738fd1498Szrj      Basic block partitioning may result in some jumps that appear to
218838fd1498Szrj      be optimizable (or blocks that appear to be mergeable), but which really
218938fd1498Szrj      must be left untouched (they are required to make it safely across
219038fd1498Szrj      partition boundaries).  See the comments at the top of
219138fd1498Szrj      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
219238fd1498Szrj 
219338fd1498Szrj   if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
219438fd1498Szrj 					BB_PARTITION (EDGE_PRED (bb, 1)->src)
219538fd1498Szrj       || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
219638fd1498Szrj     return false;
219738fd1498Szrj 
219838fd1498Szrj   /* It is always cheapest to redirect a block that ends in a branch to
219938fd1498Szrj      a block that falls through into BB, as that adds no branches to the
220038fd1498Szrj      program.  We'll try that combination first.  */
220138fd1498Szrj   fallthru = NULL;
220238fd1498Szrj   max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
220338fd1498Szrj 
220438fd1498Szrj   if (EDGE_COUNT (bb->preds) > max)
220538fd1498Szrj     return false;
220638fd1498Szrj 
220738fd1498Szrj   fallthru = find_fallthru_edge (bb->preds);
220838fd1498Szrj 
220938fd1498Szrj   changed = false;
221038fd1498Szrj   for (ix = 0; ix < EDGE_COUNT (bb->preds);)
221138fd1498Szrj     {
221238fd1498Szrj       e = EDGE_PRED (bb, ix);
221338fd1498Szrj       ix++;
221438fd1498Szrj 
221538fd1498Szrj       /* As noted above, first try with the fallthru predecessor (or, a
221638fd1498Szrj 	 fallthru predecessor if we are in cfglayout mode).  */
221738fd1498Szrj       if (fallthru)
221838fd1498Szrj 	{
221938fd1498Szrj 	  /* Don't combine the fallthru edge into anything else.
222038fd1498Szrj 	     If there is a match, we'll do it the other way around.  */
222138fd1498Szrj 	  if (e == fallthru)
222238fd1498Szrj 	    continue;
222338fd1498Szrj 	  /* If nothing changed since the last attempt, there is nothing
222438fd1498Szrj 	     we can do.  */
222538fd1498Szrj 	  if (!first_pass
222638fd1498Szrj 	      && !((e->src->flags & BB_MODIFIED)
222738fd1498Szrj 		   || (fallthru->src->flags & BB_MODIFIED)))
222838fd1498Szrj 	    continue;
222938fd1498Szrj 
223038fd1498Szrj 	  if (try_crossjump_to_edge (mode, e, fallthru, dir_forward))
223138fd1498Szrj 	    {
223238fd1498Szrj 	      changed = true;
223338fd1498Szrj 	      ix = 0;
223438fd1498Szrj 	      continue;
223538fd1498Szrj 	    }
223638fd1498Szrj 	}
223738fd1498Szrj 
223838fd1498Szrj       /* Non-obvious work limiting check: Recognize that we're going
223938fd1498Szrj 	 to call try_crossjump_bb on every basic block.  So if we have
224038fd1498Szrj 	 two blocks with lots of outgoing edges (a switch) and they
224138fd1498Szrj 	 share lots of common destinations, then we would do the
224238fd1498Szrj 	 cross-jump check once for each common destination.
224338fd1498Szrj 
224438fd1498Szrj 	 Now, if the blocks actually are cross-jump candidates, then
224538fd1498Szrj 	 all of their destinations will be shared.  Which means that
224638fd1498Szrj 	 we only need check them for cross-jump candidacy once.  We
224738fd1498Szrj 	 can eliminate redundant checks of crossjump(A,B) by arbitrarily
224838fd1498Szrj 	 choosing to do the check from the block for which the edge
224938fd1498Szrj 	 in question is the first successor of A.  */
225038fd1498Szrj       if (EDGE_SUCC (e->src, 0) != e)
225138fd1498Szrj 	continue;
225238fd1498Szrj 
225338fd1498Szrj       for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++)
225438fd1498Szrj 	{
225538fd1498Szrj 	  e2 = EDGE_PRED (bb, ix2);
225638fd1498Szrj 
225738fd1498Szrj 	  if (e2 == e)
225838fd1498Szrj 	    continue;
225938fd1498Szrj 
226038fd1498Szrj 	  /* We've already checked the fallthru edge above.  */
226138fd1498Szrj 	  if (e2 == fallthru)
226238fd1498Szrj 	    continue;
226338fd1498Szrj 
226438fd1498Szrj 	  /* The "first successor" check above only prevents multiple
226538fd1498Szrj 	     checks of crossjump(A,B).  In order to prevent redundant
226638fd1498Szrj 	     checks of crossjump(B,A), require that A be the block
226738fd1498Szrj 	     with the lowest index.  */
226838fd1498Szrj 	  if (e->src->index > e2->src->index)
226938fd1498Szrj 	    continue;
227038fd1498Szrj 
227138fd1498Szrj 	  /* If nothing changed since the last attempt, there is nothing
227238fd1498Szrj 	     we can do.  */
227338fd1498Szrj 	  if (!first_pass
227438fd1498Szrj 	      && !((e->src->flags & BB_MODIFIED)
227538fd1498Szrj 		   || (e2->src->flags & BB_MODIFIED)))
227638fd1498Szrj 	    continue;
227738fd1498Szrj 
227838fd1498Szrj 	  /* Both e and e2 are not fallthru edges, so we can crossjump in either
227938fd1498Szrj 	     direction.  */
228038fd1498Szrj 	  if (try_crossjump_to_edge (mode, e, e2, dir_both))
228138fd1498Szrj 	    {
228238fd1498Szrj 	      changed = true;
228338fd1498Szrj 	      ix = 0;
228438fd1498Szrj 	      break;
228538fd1498Szrj 	    }
228638fd1498Szrj 	}
228738fd1498Szrj     }
228838fd1498Szrj 
228938fd1498Szrj   if (changed)
229038fd1498Szrj     crossjumps_occurred = true;
229138fd1498Szrj 
229238fd1498Szrj   return changed;
229338fd1498Szrj }
229438fd1498Szrj 
229538fd1498Szrj /* Search the successors of BB for common insn sequences.  When found,
229638fd1498Szrj    share code between them by moving it across the basic block
229738fd1498Szrj    boundary.  Return true if any changes made.  */
229838fd1498Szrj 
229938fd1498Szrj static bool
try_head_merge_bb(basic_block bb)230038fd1498Szrj try_head_merge_bb (basic_block bb)
230138fd1498Szrj {
230238fd1498Szrj   basic_block final_dest_bb = NULL;
230338fd1498Szrj   int max_match = INT_MAX;
230438fd1498Szrj   edge e0;
230538fd1498Szrj   rtx_insn **headptr, **currptr, **nextptr;
230638fd1498Szrj   bool changed, moveall;
230738fd1498Szrj   unsigned ix;
230838fd1498Szrj   rtx_insn *e0_last_head;
230938fd1498Szrj   rtx cond;
231038fd1498Szrj   rtx_insn *move_before;
231138fd1498Szrj   unsigned nedges = EDGE_COUNT (bb->succs);
231238fd1498Szrj   rtx_insn *jump = BB_END (bb);
231338fd1498Szrj   regset live, live_union;
231438fd1498Szrj 
231538fd1498Szrj   /* Nothing to do if there is not at least two outgoing edges.  */
231638fd1498Szrj   if (nedges < 2)
231738fd1498Szrj     return false;
231838fd1498Szrj 
231938fd1498Szrj   /* Don't crossjump if this block ends in a computed jump,
232038fd1498Szrj      unless we are optimizing for size.  */
232138fd1498Szrj   if (optimize_bb_for_size_p (bb)
232238fd1498Szrj       && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
232338fd1498Szrj       && computed_jump_p (BB_END (bb)))
232438fd1498Szrj     return false;
232538fd1498Szrj 
232638fd1498Szrj   cond = get_condition (jump, &move_before, true, false);
232738fd1498Szrj   if (cond == NULL_RTX)
232838fd1498Szrj     {
232938fd1498Szrj       if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
233038fd1498Szrj 	move_before = prev_nonnote_nondebug_insn (jump);
233138fd1498Szrj       else
233238fd1498Szrj 	move_before = jump;
233338fd1498Szrj     }
233438fd1498Szrj 
233538fd1498Szrj   for (ix = 0; ix < nedges; ix++)
233638fd1498Szrj     if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
233738fd1498Szrj       return false;
233838fd1498Szrj 
233938fd1498Szrj   for (ix = 0; ix < nedges; ix++)
234038fd1498Szrj     {
234138fd1498Szrj       edge e = EDGE_SUCC (bb, ix);
234238fd1498Szrj       basic_block other_bb = e->dest;
234338fd1498Szrj 
234438fd1498Szrj       if (df_get_bb_dirty (other_bb))
234538fd1498Szrj 	{
234638fd1498Szrj 	  block_was_dirty = true;
234738fd1498Szrj 	  return false;
234838fd1498Szrj 	}
234938fd1498Szrj 
235038fd1498Szrj       if (e->flags & EDGE_ABNORMAL)
235138fd1498Szrj 	return false;
235238fd1498Szrj 
235338fd1498Szrj       /* Normally, all destination blocks must only be reachable from this
235438fd1498Szrj 	 block, i.e. they must have one incoming edge.
235538fd1498Szrj 
235638fd1498Szrj 	 There is one special case we can handle, that of multiple consecutive
235738fd1498Szrj 	 jumps where the first jumps to one of the targets of the second jump.
235838fd1498Szrj 	 This happens frequently in switch statements for default labels.
235938fd1498Szrj 	 The structure is as follows:
236038fd1498Szrj 	 FINAL_DEST_BB
236138fd1498Szrj 	 ....
236238fd1498Szrj 	 if (cond) jump A;
236338fd1498Szrj 	 fall through
236438fd1498Szrj 	 BB
236538fd1498Szrj 	 jump with targets A, B, C, D...
236638fd1498Szrj 	 A
236738fd1498Szrj 	 has two incoming edges, from FINAL_DEST_BB and BB
236838fd1498Szrj 
236938fd1498Szrj 	 In this case, we can try to move the insns through BB and into
237038fd1498Szrj 	 FINAL_DEST_BB.  */
237138fd1498Szrj       if (EDGE_COUNT (other_bb->preds) != 1)
237238fd1498Szrj 	{
237338fd1498Szrj 	  edge incoming_edge, incoming_bb_other_edge;
237438fd1498Szrj 	  edge_iterator ei;
237538fd1498Szrj 
237638fd1498Szrj 	  if (final_dest_bb != NULL
237738fd1498Szrj 	      || EDGE_COUNT (other_bb->preds) != 2)
237838fd1498Szrj 	    return false;
237938fd1498Szrj 
238038fd1498Szrj 	  /* We must be able to move the insns across the whole block.  */
238138fd1498Szrj 	  move_before = BB_HEAD (bb);
238238fd1498Szrj 	  while (!NONDEBUG_INSN_P (move_before))
238338fd1498Szrj 	    move_before = NEXT_INSN (move_before);
238438fd1498Szrj 
238538fd1498Szrj 	  if (EDGE_COUNT (bb->preds) != 1)
238638fd1498Szrj 	    return false;
238738fd1498Szrj 	  incoming_edge = EDGE_PRED (bb, 0);
238838fd1498Szrj 	  final_dest_bb = incoming_edge->src;
238938fd1498Szrj 	  if (EDGE_COUNT (final_dest_bb->succs) != 2)
239038fd1498Szrj 	    return false;
239138fd1498Szrj 	  FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs)
239238fd1498Szrj 	    if (incoming_bb_other_edge != incoming_edge)
239338fd1498Szrj 	      break;
239438fd1498Szrj 	  if (incoming_bb_other_edge->dest != other_bb)
239538fd1498Szrj 	    return false;
239638fd1498Szrj 	}
239738fd1498Szrj     }
239838fd1498Szrj 
239938fd1498Szrj   e0 = EDGE_SUCC (bb, 0);
240038fd1498Szrj   e0_last_head = NULL;
240138fd1498Szrj   changed = false;
240238fd1498Szrj 
240338fd1498Szrj   for (ix = 1; ix < nedges; ix++)
240438fd1498Szrj     {
240538fd1498Szrj       edge e = EDGE_SUCC (bb, ix);
240638fd1498Szrj       rtx_insn *e0_last, *e_last;
240738fd1498Szrj       int nmatch;
240838fd1498Szrj 
240938fd1498Szrj       nmatch = flow_find_head_matching_sequence (e0->dest, e->dest,
241038fd1498Szrj 						 &e0_last, &e_last, 0);
241138fd1498Szrj       if (nmatch == 0)
241238fd1498Szrj 	return false;
241338fd1498Szrj 
241438fd1498Szrj       if (nmatch < max_match)
241538fd1498Szrj 	{
241638fd1498Szrj 	  max_match = nmatch;
241738fd1498Szrj 	  e0_last_head = e0_last;
241838fd1498Szrj 	}
241938fd1498Szrj     }
242038fd1498Szrj 
242138fd1498Szrj   /* If we matched an entire block, we probably have to avoid moving the
242238fd1498Szrj      last insn.  */
242338fd1498Szrj   if (max_match > 0
242438fd1498Szrj       && e0_last_head == BB_END (e0->dest)
242538fd1498Szrj       && (find_reg_note (e0_last_head, REG_EH_REGION, 0)
242638fd1498Szrj 	  || control_flow_insn_p (e0_last_head)))
242738fd1498Szrj     {
242838fd1498Szrj       max_match--;
242938fd1498Szrj       if (max_match == 0)
243038fd1498Szrj 	return false;
243138fd1498Szrj       e0_last_head = prev_real_nondebug_insn (e0_last_head);
243238fd1498Szrj     }
243338fd1498Szrj 
243438fd1498Szrj   if (max_match == 0)
243538fd1498Szrj     return false;
243638fd1498Szrj 
243738fd1498Szrj   /* We must find a union of the live registers at each of the end points.  */
243838fd1498Szrj   live = BITMAP_ALLOC (NULL);
243938fd1498Szrj   live_union = BITMAP_ALLOC (NULL);
244038fd1498Szrj 
244138fd1498Szrj   currptr = XNEWVEC (rtx_insn *, nedges);
244238fd1498Szrj   headptr = XNEWVEC (rtx_insn *, nedges);
244338fd1498Szrj   nextptr = XNEWVEC (rtx_insn *, nedges);
244438fd1498Szrj 
244538fd1498Szrj   for (ix = 0; ix < nedges; ix++)
244638fd1498Szrj     {
244738fd1498Szrj       int j;
244838fd1498Szrj       basic_block merge_bb = EDGE_SUCC (bb, ix)->dest;
244938fd1498Szrj       rtx_insn *head = BB_HEAD (merge_bb);
245038fd1498Szrj 
245138fd1498Szrj       while (!NONDEBUG_INSN_P (head))
245238fd1498Szrj 	head = NEXT_INSN (head);
245338fd1498Szrj       headptr[ix] = head;
245438fd1498Szrj       currptr[ix] = head;
245538fd1498Szrj 
245638fd1498Szrj       /* Compute the end point and live information  */
245738fd1498Szrj       for (j = 1; j < max_match; j++)
245838fd1498Szrj 	do
245938fd1498Szrj 	  head = NEXT_INSN (head);
246038fd1498Szrj 	while (!NONDEBUG_INSN_P (head));
246138fd1498Szrj       simulate_backwards_to_point (merge_bb, live, head);
246238fd1498Szrj       IOR_REG_SET (live_union, live);
246338fd1498Szrj     }
246438fd1498Szrj 
246538fd1498Szrj   /* If we're moving across two blocks, verify the validity of the
246638fd1498Szrj      first move, then adjust the target and let the loop below deal
246738fd1498Szrj      with the final move.  */
246838fd1498Szrj   if (final_dest_bb != NULL)
246938fd1498Szrj     {
247038fd1498Szrj       rtx_insn *move_upto;
247138fd1498Szrj 
247238fd1498Szrj       moveall = can_move_insns_across (currptr[0], e0_last_head, move_before,
247338fd1498Szrj 				       jump, e0->dest, live_union,
247438fd1498Szrj 				       NULL, &move_upto);
247538fd1498Szrj       if (!moveall)
247638fd1498Szrj 	{
247738fd1498Szrj 	  if (move_upto == NULL_RTX)
247838fd1498Szrj 	    goto out;
247938fd1498Szrj 
248038fd1498Szrj 	  while (e0_last_head != move_upto)
248138fd1498Szrj 	    {
248238fd1498Szrj 	      df_simulate_one_insn_backwards (e0->dest, e0_last_head,
248338fd1498Szrj 					      live_union);
248438fd1498Szrj 	      e0_last_head = PREV_INSN (e0_last_head);
248538fd1498Szrj 	    }
248638fd1498Szrj 	}
248738fd1498Szrj       if (e0_last_head == NULL_RTX)
248838fd1498Szrj 	goto out;
248938fd1498Szrj 
249038fd1498Szrj       jump = BB_END (final_dest_bb);
249138fd1498Szrj       cond = get_condition (jump, &move_before, true, false);
249238fd1498Szrj       if (cond == NULL_RTX)
249338fd1498Szrj 	{
249438fd1498Szrj 	  if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
249538fd1498Szrj 	    move_before = prev_nonnote_nondebug_insn (jump);
249638fd1498Szrj 	  else
249738fd1498Szrj 	    move_before = jump;
249838fd1498Szrj 	}
249938fd1498Szrj     }
250038fd1498Szrj 
250138fd1498Szrj   do
250238fd1498Szrj     {
250338fd1498Szrj       rtx_insn *move_upto;
250438fd1498Szrj       moveall = can_move_insns_across (currptr[0], e0_last_head,
250538fd1498Szrj 				       move_before, jump, e0->dest, live_union,
250638fd1498Szrj 				       NULL, &move_upto);
250738fd1498Szrj       if (!moveall && move_upto == NULL_RTX)
250838fd1498Szrj 	{
250938fd1498Szrj 	  if (jump == move_before)
251038fd1498Szrj 	    break;
251138fd1498Szrj 
251238fd1498Szrj 	  /* Try again, using a different insertion point.  */
251338fd1498Szrj 	  move_before = jump;
251438fd1498Szrj 
251538fd1498Szrj 	  /* Don't try moving before a cc0 user, as that may invalidate
251638fd1498Szrj 	     the cc0.  */
251738fd1498Szrj 	  if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
251838fd1498Szrj 	    break;
251938fd1498Szrj 
252038fd1498Szrj 	  continue;
252138fd1498Szrj 	}
252238fd1498Szrj 
252338fd1498Szrj       if (final_dest_bb && !moveall)
252438fd1498Szrj 	/* We haven't checked whether a partial move would be OK for the first
252538fd1498Szrj 	   move, so we have to fail this case.  */
252638fd1498Szrj 	break;
252738fd1498Szrj 
252838fd1498Szrj       changed = true;
252938fd1498Szrj       for (;;)
253038fd1498Szrj 	{
253138fd1498Szrj 	  if (currptr[0] == move_upto)
253238fd1498Szrj 	    break;
253338fd1498Szrj 	  for (ix = 0; ix < nedges; ix++)
253438fd1498Szrj 	    {
253538fd1498Szrj 	      rtx_insn *curr = currptr[ix];
253638fd1498Szrj 	      do
253738fd1498Szrj 		curr = NEXT_INSN (curr);
253838fd1498Szrj 	      while (!NONDEBUG_INSN_P (curr));
253938fd1498Szrj 	      currptr[ix] = curr;
254038fd1498Szrj 	    }
254138fd1498Szrj 	}
254238fd1498Szrj 
254338fd1498Szrj       /* If we can't currently move all of the identical insns, remember
254438fd1498Szrj 	 each insn after the range that we'll merge.  */
254538fd1498Szrj       if (!moveall)
254638fd1498Szrj 	for (ix = 0; ix < nedges; ix++)
254738fd1498Szrj 	  {
254838fd1498Szrj 	    rtx_insn *curr = currptr[ix];
254938fd1498Szrj 	    do
255038fd1498Szrj 	      curr = NEXT_INSN (curr);
255138fd1498Szrj 	    while (!NONDEBUG_INSN_P (curr));
255238fd1498Szrj 	    nextptr[ix] = curr;
255338fd1498Szrj 	  }
255438fd1498Szrj 
255538fd1498Szrj       reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before));
255638fd1498Szrj       df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest);
255738fd1498Szrj       if (final_dest_bb != NULL)
255838fd1498Szrj 	df_set_bb_dirty (final_dest_bb);
255938fd1498Szrj       df_set_bb_dirty (bb);
256038fd1498Szrj       for (ix = 1; ix < nedges; ix++)
256138fd1498Szrj 	{
256238fd1498Szrj 	  df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest);
256338fd1498Szrj 	  delete_insn_chain (headptr[ix], currptr[ix], false);
256438fd1498Szrj 	}
256538fd1498Szrj       if (!moveall)
256638fd1498Szrj 	{
256738fd1498Szrj 	  if (jump == move_before)
256838fd1498Szrj 	    break;
256938fd1498Szrj 
257038fd1498Szrj 	  /* For the unmerged insns, try a different insertion point.  */
257138fd1498Szrj 	  move_before = jump;
257238fd1498Szrj 
257338fd1498Szrj 	  /* Don't try moving before a cc0 user, as that may invalidate
257438fd1498Szrj 	     the cc0.  */
257538fd1498Szrj 	  if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
257638fd1498Szrj 	    break;
257738fd1498Szrj 
257838fd1498Szrj 	  for (ix = 0; ix < nedges; ix++)
257938fd1498Szrj 	    currptr[ix] = headptr[ix] = nextptr[ix];
258038fd1498Szrj 	}
258138fd1498Szrj     }
258238fd1498Szrj   while (!moveall);
258338fd1498Szrj 
258438fd1498Szrj  out:
258538fd1498Szrj   free (currptr);
258638fd1498Szrj   free (headptr);
258738fd1498Szrj   free (nextptr);
258838fd1498Szrj 
258938fd1498Szrj   crossjumps_occurred |= changed;
259038fd1498Szrj 
259138fd1498Szrj   return changed;
259238fd1498Szrj }
259338fd1498Szrj 
259438fd1498Szrj /* Return true if BB contains just bb note, or bb note followed
259538fd1498Szrj    by only DEBUG_INSNs.  */
259638fd1498Szrj 
259738fd1498Szrj static bool
trivially_empty_bb_p(basic_block bb)259838fd1498Szrj trivially_empty_bb_p (basic_block bb)
259938fd1498Szrj {
260038fd1498Szrj   rtx_insn *insn = BB_END (bb);
260138fd1498Szrj 
260238fd1498Szrj   while (1)
260338fd1498Szrj     {
260438fd1498Szrj       if (insn == BB_HEAD (bb))
260538fd1498Szrj 	return true;
260638fd1498Szrj       if (!DEBUG_INSN_P (insn))
260738fd1498Szrj 	return false;
260838fd1498Szrj       insn = PREV_INSN (insn);
260938fd1498Szrj     }
261038fd1498Szrj }
261138fd1498Szrj 
261238fd1498Szrj /* Return true if BB contains just a return and possibly a USE of the
261338fd1498Szrj    return value.  Fill in *RET and *USE with the return and use insns
261438fd1498Szrj    if any found, otherwise NULL.  All CLOBBERs are ignored.  */
261538fd1498Szrj 
261638fd1498Szrj static bool
bb_is_just_return(basic_block bb,rtx_insn ** ret,rtx_insn ** use)261738fd1498Szrj bb_is_just_return (basic_block bb, rtx_insn **ret, rtx_insn **use)
261838fd1498Szrj {
261938fd1498Szrj   *ret = *use = NULL;
262038fd1498Szrj   rtx_insn *insn;
262138fd1498Szrj 
262238fd1498Szrj   if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
262338fd1498Szrj     return false;
262438fd1498Szrj 
262538fd1498Szrj   FOR_BB_INSNS (bb, insn)
262638fd1498Szrj     if (NONDEBUG_INSN_P (insn))
262738fd1498Szrj       {
262838fd1498Szrj 	rtx pat = PATTERN (insn);
262938fd1498Szrj 
263038fd1498Szrj 	if (!*ret && ANY_RETURN_P (pat))
263138fd1498Szrj 	  *ret = insn;
263238fd1498Szrj 	else if (!*ret && !*use && GET_CODE (pat) == USE
263338fd1498Szrj 	    && REG_P (XEXP (pat, 0))
263438fd1498Szrj 	    && REG_FUNCTION_VALUE_P (XEXP (pat, 0)))
263538fd1498Szrj 	  *use = insn;
263638fd1498Szrj 	else if (GET_CODE (pat) != CLOBBER)
263738fd1498Szrj 	  return false;
263838fd1498Szrj       }
263938fd1498Szrj 
264038fd1498Szrj   return !!*ret;
264138fd1498Szrj }
264238fd1498Szrj 
264338fd1498Szrj /* Do simple CFG optimizations - basic block merging, simplifying of jump
264438fd1498Szrj    instructions etc.  Return nonzero if changes were made.  */
264538fd1498Szrj 
264638fd1498Szrj static bool
try_optimize_cfg(int mode)264738fd1498Szrj try_optimize_cfg (int mode)
264838fd1498Szrj {
264938fd1498Szrj   bool changed_overall = false;
265038fd1498Szrj   bool changed;
265138fd1498Szrj   int iterations = 0;
265238fd1498Szrj   basic_block bb, b, next;
265338fd1498Szrj 
265438fd1498Szrj   if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
265538fd1498Szrj     clear_bb_flags ();
265638fd1498Szrj 
265738fd1498Szrj   crossjumps_occurred = false;
265838fd1498Szrj 
265938fd1498Szrj   FOR_EACH_BB_FN (bb, cfun)
266038fd1498Szrj     update_forwarder_flag (bb);
266138fd1498Szrj 
266238fd1498Szrj   if (! targetm.cannot_modify_jumps_p ())
266338fd1498Szrj     {
266438fd1498Szrj       first_pass = true;
266538fd1498Szrj       /* Attempt to merge blocks as made possible by edge removal.  If
266638fd1498Szrj 	 a block has only one successor, and the successor has only
266738fd1498Szrj 	 one predecessor, they may be combined.  */
266838fd1498Szrj       do
266938fd1498Szrj 	{
267038fd1498Szrj 	  block_was_dirty = false;
267138fd1498Szrj 	  changed = false;
267238fd1498Szrj 	  iterations++;
267338fd1498Szrj 
267438fd1498Szrj 	  if (dump_file)
267538fd1498Szrj 	    fprintf (dump_file,
267638fd1498Szrj 		     "\n\ntry_optimize_cfg iteration %i\n\n",
267738fd1498Szrj 		     iterations);
267838fd1498Szrj 
267938fd1498Szrj 	  for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
268038fd1498Szrj 	       != EXIT_BLOCK_PTR_FOR_FN (cfun);)
268138fd1498Szrj 	    {
268238fd1498Szrj 	      basic_block c;
268338fd1498Szrj 	      edge s;
268438fd1498Szrj 	      bool changed_here = false;
268538fd1498Szrj 
268638fd1498Szrj 	      /* Delete trivially dead basic blocks.  This is either
268738fd1498Szrj 		 blocks with no predecessors, or empty blocks with no
268838fd1498Szrj 		 successors.  However if the empty block with no
268938fd1498Szrj 		 successors is the successor of the ENTRY_BLOCK, it is
269038fd1498Szrj 		 kept.  This ensures that the ENTRY_BLOCK will have a
269138fd1498Szrj 		 successor which is a precondition for many RTL
269238fd1498Szrj 		 passes.  Empty blocks may result from expanding
269338fd1498Szrj 		 __builtin_unreachable ().  */
269438fd1498Szrj 	      if (EDGE_COUNT (b->preds) == 0
269538fd1498Szrj 		  || (EDGE_COUNT (b->succs) == 0
269638fd1498Szrj 		      && trivially_empty_bb_p (b)
269738fd1498Szrj 		      && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->dest
269838fd1498Szrj 		      != b))
269938fd1498Szrj 		{
270038fd1498Szrj 		  c = b->prev_bb;
270138fd1498Szrj 		  if (EDGE_COUNT (b->preds) > 0)
270238fd1498Szrj 		    {
270338fd1498Szrj 		      edge e;
270438fd1498Szrj 		      edge_iterator ei;
270538fd1498Szrj 
270638fd1498Szrj 		      if (current_ir_type () == IR_RTL_CFGLAYOUT)
270738fd1498Szrj 			{
270838fd1498Szrj 			  if (BB_FOOTER (b)
270938fd1498Szrj 			      && BARRIER_P (BB_FOOTER (b)))
271038fd1498Szrj 			    FOR_EACH_EDGE (e, ei, b->preds)
271138fd1498Szrj 			      if ((e->flags & EDGE_FALLTHRU)
271238fd1498Szrj 				  && BB_FOOTER (e->src) == NULL)
271338fd1498Szrj 				{
271438fd1498Szrj 				  if (BB_FOOTER (b))
271538fd1498Szrj 				    {
271638fd1498Szrj 				      BB_FOOTER (e->src) = BB_FOOTER (b);
271738fd1498Szrj 				      BB_FOOTER (b) = NULL;
271838fd1498Szrj 				    }
271938fd1498Szrj 				  else
272038fd1498Szrj 				    {
272138fd1498Szrj 				      start_sequence ();
272238fd1498Szrj 				      BB_FOOTER (e->src) = emit_barrier ();
272338fd1498Szrj 				      end_sequence ();
272438fd1498Szrj 				    }
272538fd1498Szrj 				}
272638fd1498Szrj 			}
272738fd1498Szrj 		      else
272838fd1498Szrj 			{
272938fd1498Szrj 			  rtx_insn *last = get_last_bb_insn (b);
273038fd1498Szrj 			  if (last && BARRIER_P (last))
273138fd1498Szrj 			    FOR_EACH_EDGE (e, ei, b->preds)
273238fd1498Szrj 			      if ((e->flags & EDGE_FALLTHRU))
273338fd1498Szrj 				emit_barrier_after (BB_END (e->src));
273438fd1498Szrj 			}
273538fd1498Szrj 		    }
273638fd1498Szrj 		  delete_basic_block (b);
273738fd1498Szrj 		  changed = true;
273838fd1498Szrj 		  /* Avoid trying to remove the exit block.  */
273938fd1498Szrj 		  b = (c == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? c->next_bb : c);
274038fd1498Szrj 		  continue;
274138fd1498Szrj 		}
274238fd1498Szrj 
274338fd1498Szrj 	      /* Remove code labels no longer used.  */
274438fd1498Szrj 	      if (single_pred_p (b)
274538fd1498Szrj 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
274638fd1498Szrj 		  && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
274738fd1498Szrj 		  && LABEL_P (BB_HEAD (b))
274838fd1498Szrj 		  && !LABEL_PRESERVE_P (BB_HEAD (b))
274938fd1498Szrj 		  /* If the previous block ends with a branch to this
275038fd1498Szrj 		     block, we can't delete the label.  Normally this
275138fd1498Szrj 		     is a condjump that is yet to be simplified, but
275238fd1498Szrj 		     if CASE_DROPS_THRU, this can be a tablejump with
275338fd1498Szrj 		     some element going to the same place as the
275438fd1498Szrj 		     default (fallthru).  */
275538fd1498Szrj 		  && (single_pred (b) == ENTRY_BLOCK_PTR_FOR_FN (cfun)
275638fd1498Szrj 		      || !JUMP_P (BB_END (single_pred (b)))
275738fd1498Szrj 		      || ! label_is_jump_target_p (BB_HEAD (b),
275838fd1498Szrj 						   BB_END (single_pred (b)))))
275938fd1498Szrj 		{
276038fd1498Szrj 		  delete_insn (BB_HEAD (b));
276138fd1498Szrj 		  if (dump_file)
276238fd1498Szrj 		    fprintf (dump_file, "Deleted label in block %i.\n",
276338fd1498Szrj 			     b->index);
276438fd1498Szrj 		}
276538fd1498Szrj 
276638fd1498Szrj 	      /* If we fall through an empty block, we can remove it.  */
276738fd1498Szrj 	      if (!(mode & (CLEANUP_CFGLAYOUT | CLEANUP_NO_INSN_DEL))
276838fd1498Szrj 		  && single_pred_p (b)
276938fd1498Szrj 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
277038fd1498Szrj 		  && !LABEL_P (BB_HEAD (b))
277138fd1498Szrj 		  && FORWARDER_BLOCK_P (b)
277238fd1498Szrj 		  /* Note that forwarder_block_p true ensures that
277338fd1498Szrj 		     there is a successor for this block.  */
277438fd1498Szrj 		  && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
277538fd1498Szrj 		  && n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS + 1)
277638fd1498Szrj 		{
277738fd1498Szrj 		  if (dump_file)
277838fd1498Szrj 		    fprintf (dump_file,
277938fd1498Szrj 			     "Deleting fallthru block %i.\n",
278038fd1498Szrj 			     b->index);
278138fd1498Szrj 
278238fd1498Szrj 		  c = ((b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
278338fd1498Szrj 		       ? b->next_bb : b->prev_bb);
278438fd1498Szrj 		  redirect_edge_succ_nodup (single_pred_edge (b),
278538fd1498Szrj 					    single_succ (b));
278638fd1498Szrj 		  delete_basic_block (b);
278738fd1498Szrj 		  changed = true;
278838fd1498Szrj 		  b = c;
278938fd1498Szrj 		  continue;
279038fd1498Szrj 		}
279138fd1498Szrj 
279238fd1498Szrj 	      /* Merge B with its single successor, if any.  */
279338fd1498Szrj 	      if (single_succ_p (b)
279438fd1498Szrj 		  && (s = single_succ_edge (b))
279538fd1498Szrj 		  && !(s->flags & EDGE_COMPLEX)
279638fd1498Szrj 		  && (c = s->dest) != EXIT_BLOCK_PTR_FOR_FN (cfun)
279738fd1498Szrj 		  && single_pred_p (c)
279838fd1498Szrj 		  && b != c)
279938fd1498Szrj 		{
280038fd1498Szrj 		  /* When not in cfg_layout mode use code aware of reordering
280138fd1498Szrj 		     INSN.  This code possibly creates new basic blocks so it
280238fd1498Szrj 		     does not fit merge_blocks interface and is kept here in
280338fd1498Szrj 		     hope that it will become useless once more of compiler
280438fd1498Szrj 		     is transformed to use cfg_layout mode.  */
280538fd1498Szrj 
280638fd1498Szrj 		  if ((mode & CLEANUP_CFGLAYOUT)
280738fd1498Szrj 		      && can_merge_blocks_p (b, c))
280838fd1498Szrj 		    {
280938fd1498Szrj 		      merge_blocks (b, c);
281038fd1498Szrj 		      update_forwarder_flag (b);
281138fd1498Szrj 		      changed_here = true;
281238fd1498Szrj 		    }
281338fd1498Szrj 		  else if (!(mode & CLEANUP_CFGLAYOUT)
281438fd1498Szrj 			   /* If the jump insn has side effects,
281538fd1498Szrj 			      we can't kill the edge.  */
281638fd1498Szrj 			   && (!JUMP_P (BB_END (b))
281738fd1498Szrj 			       || (reload_completed
281838fd1498Szrj 				   ? simplejump_p (BB_END (b))
281938fd1498Szrj 				   : (onlyjump_p (BB_END (b))
282038fd1498Szrj 				      && !tablejump_p (BB_END (b),
282138fd1498Szrj 						       NULL, NULL))))
282238fd1498Szrj 			   && (next = merge_blocks_move (s, b, c, mode)))
282338fd1498Szrj 		      {
282438fd1498Szrj 			b = next;
282538fd1498Szrj 			changed_here = true;
282638fd1498Szrj 		      }
282738fd1498Szrj 		}
282838fd1498Szrj 
282938fd1498Szrj 	      /* Try to change a branch to a return to just that return.  */
283038fd1498Szrj 	      rtx_insn *ret, *use;
283138fd1498Szrj 	      if (single_succ_p (b)
283238fd1498Szrj 		  && onlyjump_p (BB_END (b))
283338fd1498Szrj 		  && bb_is_just_return (single_succ (b), &ret, &use))
283438fd1498Szrj 		{
283538fd1498Szrj 		  if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
283638fd1498Szrj 				     PATTERN (ret), 0))
283738fd1498Szrj 		    {
283838fd1498Szrj 		      if (use)
283938fd1498Szrj 			emit_insn_before (copy_insn (PATTERN (use)),
284038fd1498Szrj 					  BB_END (b));
284138fd1498Szrj 		      if (dump_file)
284238fd1498Szrj 			fprintf (dump_file, "Changed jump %d->%d to return.\n",
284338fd1498Szrj 				 b->index, single_succ (b)->index);
284438fd1498Szrj 		      redirect_edge_succ (single_succ_edge (b),
284538fd1498Szrj 					  EXIT_BLOCK_PTR_FOR_FN (cfun));
284638fd1498Szrj 		      single_succ_edge (b)->flags &= ~EDGE_CROSSING;
284738fd1498Szrj 		      changed_here = true;
284838fd1498Szrj 		    }
284938fd1498Szrj 		}
285038fd1498Szrj 
285138fd1498Szrj 	      /* Try to change a conditional branch to a return to the
285238fd1498Szrj 		 respective conditional return.  */
285338fd1498Szrj 	      if (EDGE_COUNT (b->succs) == 2
285438fd1498Szrj 		  && any_condjump_p (BB_END (b))
285538fd1498Szrj 		  && bb_is_just_return (BRANCH_EDGE (b)->dest, &ret, &use))
285638fd1498Szrj 		{
285738fd1498Szrj 		  if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
285838fd1498Szrj 				     PATTERN (ret), 0))
285938fd1498Szrj 		    {
286038fd1498Szrj 		      if (use)
286138fd1498Szrj 			emit_insn_before (copy_insn (PATTERN (use)),
286238fd1498Szrj 					  BB_END (b));
286338fd1498Szrj 		      if (dump_file)
286438fd1498Szrj 			fprintf (dump_file, "Changed conditional jump %d->%d "
286538fd1498Szrj 				 "to conditional return.\n",
286638fd1498Szrj 				 b->index, BRANCH_EDGE (b)->dest->index);
286738fd1498Szrj 		      redirect_edge_succ (BRANCH_EDGE (b),
286838fd1498Szrj 					  EXIT_BLOCK_PTR_FOR_FN (cfun));
286938fd1498Szrj 		      BRANCH_EDGE (b)->flags &= ~EDGE_CROSSING;
287038fd1498Szrj 		      changed_here = true;
287138fd1498Szrj 		    }
287238fd1498Szrj 		}
287338fd1498Szrj 
287438fd1498Szrj 	      /* Try to flip a conditional branch that falls through to
287538fd1498Szrj 		 a return so that it becomes a conditional return and a
287638fd1498Szrj 		 new jump to the original branch target.  */
287738fd1498Szrj 	      if (EDGE_COUNT (b->succs) == 2
287838fd1498Szrj 		  && BRANCH_EDGE (b)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
287938fd1498Szrj 		  && any_condjump_p (BB_END (b))
288038fd1498Szrj 		  && bb_is_just_return (FALLTHRU_EDGE (b)->dest, &ret, &use))
288138fd1498Szrj 		{
288238fd1498Szrj 		  if (invert_jump (as_a <rtx_jump_insn *> (BB_END (b)),
288338fd1498Szrj 				   JUMP_LABEL (BB_END (b)), 0))
288438fd1498Szrj 		    {
288538fd1498Szrj 		      basic_block new_ft = BRANCH_EDGE (b)->dest;
288638fd1498Szrj 		      if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
288738fd1498Szrj 					 PATTERN (ret), 0))
288838fd1498Szrj 			{
288938fd1498Szrj 			  if (use)
289038fd1498Szrj 			    emit_insn_before (copy_insn (PATTERN (use)),
289138fd1498Szrj 					      BB_END (b));
289238fd1498Szrj 			  if (dump_file)
289338fd1498Szrj 			    fprintf (dump_file, "Changed conditional jump "
289438fd1498Szrj 				     "%d->%d to conditional return, adding "
289538fd1498Szrj 				     "fall-through jump.\n",
289638fd1498Szrj 				     b->index, BRANCH_EDGE (b)->dest->index);
289738fd1498Szrj 			  redirect_edge_succ (BRANCH_EDGE (b),
289838fd1498Szrj 					      EXIT_BLOCK_PTR_FOR_FN (cfun));
289938fd1498Szrj 			  BRANCH_EDGE (b)->flags &= ~EDGE_CROSSING;
290038fd1498Szrj 			  std::swap (BRANCH_EDGE (b)->probability,
290138fd1498Szrj 				     FALLTHRU_EDGE (b)->probability);
290238fd1498Szrj 			  update_br_prob_note (b);
290338fd1498Szrj 			  basic_block jb = force_nonfallthru (FALLTHRU_EDGE (b));
290438fd1498Szrj 			  notice_new_block (jb);
290538fd1498Szrj 			  if (!redirect_jump (as_a <rtx_jump_insn *> (BB_END (jb)),
290638fd1498Szrj 					      block_label (new_ft), 0))
290738fd1498Szrj 			    gcc_unreachable ();
290838fd1498Szrj 			  redirect_edge_succ (single_succ_edge (jb), new_ft);
290938fd1498Szrj 			  changed_here = true;
291038fd1498Szrj 			}
291138fd1498Szrj 		      else
291238fd1498Szrj 			{
291338fd1498Szrj 			  /* Invert the jump back to what it was.  This should
291438fd1498Szrj 			     never fail.  */
291538fd1498Szrj 			  if (!invert_jump (as_a <rtx_jump_insn *> (BB_END (b)),
291638fd1498Szrj 					    JUMP_LABEL (BB_END (b)), 0))
291738fd1498Szrj 			    gcc_unreachable ();
291838fd1498Szrj 			}
291938fd1498Szrj 		    }
292038fd1498Szrj 		}
292138fd1498Szrj 
292238fd1498Szrj 	      /* Simplify branch over branch.  */
292338fd1498Szrj 	      if ((mode & CLEANUP_EXPENSIVE)
292438fd1498Szrj 		   && !(mode & CLEANUP_CFGLAYOUT)
292538fd1498Szrj 		   && try_simplify_condjump (b))
292638fd1498Szrj 		changed_here = true;
292738fd1498Szrj 
292838fd1498Szrj 	      /* If B has a single outgoing edge, but uses a
292938fd1498Szrj 		 non-trivial jump instruction without side-effects, we
293038fd1498Szrj 		 can either delete the jump entirely, or replace it
293138fd1498Szrj 		 with a simple unconditional jump.  */
293238fd1498Szrj 	      if (single_succ_p (b)
293338fd1498Szrj 		  && single_succ (b) != EXIT_BLOCK_PTR_FOR_FN (cfun)
293438fd1498Szrj 		  && onlyjump_p (BB_END (b))
293538fd1498Szrj 		  && !CROSSING_JUMP_P (BB_END (b))
293638fd1498Szrj 		  && try_redirect_by_replacing_jump (single_succ_edge (b),
293738fd1498Szrj 						     single_succ (b),
293838fd1498Szrj 						     (mode & CLEANUP_CFGLAYOUT) != 0))
293938fd1498Szrj 		{
294038fd1498Szrj 		  update_forwarder_flag (b);
294138fd1498Szrj 		  changed_here = true;
294238fd1498Szrj 		}
294338fd1498Szrj 
294438fd1498Szrj 	      /* Simplify branch to branch.  */
294538fd1498Szrj 	      if (try_forward_edges (mode, b))
294638fd1498Szrj 		{
294738fd1498Szrj 		  update_forwarder_flag (b);
294838fd1498Szrj 		  changed_here = true;
294938fd1498Szrj 		}
295038fd1498Szrj 
295138fd1498Szrj 	      /* Look for shared code between blocks.  */
295238fd1498Szrj 	      if ((mode & CLEANUP_CROSSJUMP)
295338fd1498Szrj 		  && try_crossjump_bb (mode, b))
295438fd1498Szrj 		changed_here = true;
295538fd1498Szrj 
295638fd1498Szrj 	      if ((mode & CLEANUP_CROSSJUMP)
295738fd1498Szrj 		  /* This can lengthen register lifetimes.  Do it only after
295838fd1498Szrj 		     reload.  */
295938fd1498Szrj 		  && reload_completed
296038fd1498Szrj 		  && try_head_merge_bb (b))
296138fd1498Szrj 		changed_here = true;
296238fd1498Szrj 
296338fd1498Szrj 	      /* Don't get confused by the index shift caused by
296438fd1498Szrj 		 deleting blocks.  */
296538fd1498Szrj 	      if (!changed_here)
296638fd1498Szrj 		b = b->next_bb;
296738fd1498Szrj 	      else
296838fd1498Szrj 		changed = true;
296938fd1498Szrj 	    }
297038fd1498Szrj 
297138fd1498Szrj 	  if ((mode & CLEANUP_CROSSJUMP)
297238fd1498Szrj 	      && try_crossjump_bb (mode, EXIT_BLOCK_PTR_FOR_FN (cfun)))
297338fd1498Szrj 	    changed = true;
297438fd1498Szrj 
297538fd1498Szrj 	  if (block_was_dirty)
297638fd1498Szrj 	    {
297738fd1498Szrj 	      /* This should only be set by head-merging.  */
297838fd1498Szrj 	      gcc_assert (mode & CLEANUP_CROSSJUMP);
297938fd1498Szrj 	      df_analyze ();
298038fd1498Szrj 	    }
298138fd1498Szrj 
298238fd1498Szrj 	  if (changed)
298338fd1498Szrj             {
298438fd1498Szrj               /* Edge forwarding in particular can cause hot blocks previously
298538fd1498Szrj                  reached by both hot and cold blocks to become dominated only
298638fd1498Szrj                  by cold blocks. This will cause the verification below to fail,
298738fd1498Szrj                  and lead to now cold code in the hot section. This is not easy
298838fd1498Szrj                  to detect and fix during edge forwarding, and in some cases
298938fd1498Szrj                  is only visible after newly unreachable blocks are deleted,
299038fd1498Szrj                  which will be done in fixup_partitions.  */
299138fd1498Szrj 	      if ((mode & CLEANUP_NO_PARTITIONING) == 0)
299238fd1498Szrj 		{
299338fd1498Szrj 		  fixup_partitions ();
299438fd1498Szrj 	          checking_verify_flow_info ();
299538fd1498Szrj 		}
299638fd1498Szrj             }
299738fd1498Szrj 
299838fd1498Szrj 	  changed_overall |= changed;
299938fd1498Szrj 	  first_pass = false;
300038fd1498Szrj 	}
300138fd1498Szrj       while (changed);
300238fd1498Szrj     }
300338fd1498Szrj 
300438fd1498Szrj   FOR_ALL_BB_FN (b, cfun)
300538fd1498Szrj     b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
300638fd1498Szrj 
300738fd1498Szrj   return changed_overall;
300838fd1498Szrj }
300938fd1498Szrj 
301038fd1498Szrj /* Delete all unreachable basic blocks.  */
301138fd1498Szrj 
301238fd1498Szrj bool
delete_unreachable_blocks(void)301338fd1498Szrj delete_unreachable_blocks (void)
301438fd1498Szrj {
301538fd1498Szrj   bool changed = false;
301638fd1498Szrj   basic_block b, prev_bb;
301738fd1498Szrj 
301838fd1498Szrj   find_unreachable_blocks ();
301938fd1498Szrj 
302038fd1498Szrj   /* When we're in GIMPLE mode and there may be debug bind insns, we
302138fd1498Szrj      should delete blocks in reverse dominator order, so as to get a
302238fd1498Szrj      chance to substitute all released DEFs into debug bind stmts.  If
302338fd1498Szrj      we don't have dominators information, walking blocks backward
302438fd1498Szrj      gets us a better chance of retaining most debug information than
302538fd1498Szrj      otherwise.  */
302638fd1498Szrj   if (MAY_HAVE_DEBUG_BIND_INSNS && current_ir_type () == IR_GIMPLE
302738fd1498Szrj       && dom_info_available_p (CDI_DOMINATORS))
302838fd1498Szrj     {
302938fd1498Szrj       for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
303038fd1498Szrj 	   b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
303138fd1498Szrj 	{
303238fd1498Szrj 	  prev_bb = b->prev_bb;
303338fd1498Szrj 
303438fd1498Szrj 	  if (!(b->flags & BB_REACHABLE))
303538fd1498Szrj 	    {
303638fd1498Szrj 	      /* Speed up the removal of blocks that don't dominate
303738fd1498Szrj 		 others.  Walking backwards, this should be the common
303838fd1498Szrj 		 case.  */
303938fd1498Szrj 	      if (!first_dom_son (CDI_DOMINATORS, b))
304038fd1498Szrj 		delete_basic_block (b);
304138fd1498Szrj 	      else
304238fd1498Szrj 		{
304338fd1498Szrj 		  vec<basic_block> h
304438fd1498Szrj 		    = get_all_dominated_blocks (CDI_DOMINATORS, b);
304538fd1498Szrj 
304638fd1498Szrj 		  while (h.length ())
304738fd1498Szrj 		    {
304838fd1498Szrj 		      b = h.pop ();
304938fd1498Szrj 
305038fd1498Szrj 		      prev_bb = b->prev_bb;
305138fd1498Szrj 
305238fd1498Szrj 		      gcc_assert (!(b->flags & BB_REACHABLE));
305338fd1498Szrj 
305438fd1498Szrj 		      delete_basic_block (b);
305538fd1498Szrj 		    }
305638fd1498Szrj 
305738fd1498Szrj 		  h.release ();
305838fd1498Szrj 		}
305938fd1498Szrj 
306038fd1498Szrj 	      changed = true;
306138fd1498Szrj 	    }
306238fd1498Szrj 	}
306338fd1498Szrj     }
306438fd1498Szrj   else
306538fd1498Szrj     {
306638fd1498Szrj       for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
306738fd1498Szrj 	   b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
306838fd1498Szrj 	{
306938fd1498Szrj 	  prev_bb = b->prev_bb;
307038fd1498Szrj 
307138fd1498Szrj 	  if (!(b->flags & BB_REACHABLE))
307238fd1498Szrj 	    {
307338fd1498Szrj 	      delete_basic_block (b);
307438fd1498Szrj 	      changed = true;
307538fd1498Szrj 	    }
307638fd1498Szrj 	}
307738fd1498Szrj     }
307838fd1498Szrj 
307938fd1498Szrj   if (changed)
308038fd1498Szrj     tidy_fallthru_edges ();
308138fd1498Szrj   return changed;
308238fd1498Szrj }
308338fd1498Szrj 
308438fd1498Szrj /* Delete any jump tables never referenced.  We can't delete them at the
308538fd1498Szrj    time of removing tablejump insn as they are referenced by the preceding
308638fd1498Szrj    insns computing the destination, so we delay deleting and garbagecollect
308738fd1498Szrj    them once life information is computed.  */
308838fd1498Szrj void
delete_dead_jumptables(void)308938fd1498Szrj delete_dead_jumptables (void)
309038fd1498Szrj {
309138fd1498Szrj   basic_block bb;
309238fd1498Szrj 
309338fd1498Szrj   /* A dead jump table does not belong to any basic block.  Scan insns
309438fd1498Szrj      between two adjacent basic blocks.  */
309538fd1498Szrj   FOR_EACH_BB_FN (bb, cfun)
309638fd1498Szrj     {
309738fd1498Szrj       rtx_insn *insn, *next;
309838fd1498Szrj 
309938fd1498Szrj       for (insn = NEXT_INSN (BB_END (bb));
310038fd1498Szrj 	   insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
310138fd1498Szrj 	   insn = next)
310238fd1498Szrj 	{
310338fd1498Szrj 	  next = NEXT_INSN (insn);
310438fd1498Szrj 	  if (LABEL_P (insn)
310538fd1498Szrj 	      && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
310638fd1498Szrj 	      && JUMP_TABLE_DATA_P (next))
310738fd1498Szrj 	    {
310838fd1498Szrj 	      rtx_insn *label = insn, *jump = next;
310938fd1498Szrj 
311038fd1498Szrj 	      if (dump_file)
311138fd1498Szrj 		fprintf (dump_file, "Dead jumptable %i removed\n",
311238fd1498Szrj 			 INSN_UID (insn));
311338fd1498Szrj 
311438fd1498Szrj 	      next = NEXT_INSN (next);
311538fd1498Szrj 	      delete_insn (jump);
311638fd1498Szrj 	      delete_insn (label);
311738fd1498Szrj 	    }
311838fd1498Szrj 	}
311938fd1498Szrj     }
312038fd1498Szrj }
312138fd1498Szrj 
312238fd1498Szrj 
312338fd1498Szrj /* Tidy the CFG by deleting unreachable code and whatnot.  */
312438fd1498Szrj 
312538fd1498Szrj bool
cleanup_cfg(int mode)312638fd1498Szrj cleanup_cfg (int mode)
312738fd1498Szrj {
312838fd1498Szrj   bool changed = false;
312938fd1498Szrj 
313038fd1498Szrj   /* Set the cfglayout mode flag here.  We could update all the callers
313138fd1498Szrj      but that is just inconvenient, especially given that we eventually
313238fd1498Szrj      want to have cfglayout mode as the default.  */
313338fd1498Szrj   if (current_ir_type () == IR_RTL_CFGLAYOUT)
313438fd1498Szrj     mode |= CLEANUP_CFGLAYOUT;
313538fd1498Szrj 
313638fd1498Szrj   timevar_push (TV_CLEANUP_CFG);
313738fd1498Szrj   if (delete_unreachable_blocks ())
313838fd1498Szrj     {
313938fd1498Szrj       changed = true;
314038fd1498Szrj       /* We've possibly created trivially dead code.  Cleanup it right
314138fd1498Szrj 	 now to introduce more opportunities for try_optimize_cfg.  */
314238fd1498Szrj       if (!(mode & (CLEANUP_NO_INSN_DEL))
314338fd1498Szrj 	  && !reload_completed)
314438fd1498Szrj 	delete_trivially_dead_insns (get_insns (), max_reg_num ());
314538fd1498Szrj     }
314638fd1498Szrj 
314738fd1498Szrj   compact_blocks ();
314838fd1498Szrj 
314938fd1498Szrj   /* To tail-merge blocks ending in the same noreturn function (e.g.
315038fd1498Szrj      a call to abort) we have to insert fake edges to exit.  Do this
315138fd1498Szrj      here once.  The fake edges do not interfere with any other CFG
315238fd1498Szrj      cleanups.  */
315338fd1498Szrj   if (mode & CLEANUP_CROSSJUMP)
315438fd1498Szrj     add_noreturn_fake_exit_edges ();
315538fd1498Szrj 
315638fd1498Szrj   if (!dbg_cnt (cfg_cleanup))
315738fd1498Szrj     return changed;
315838fd1498Szrj 
315938fd1498Szrj   while (try_optimize_cfg (mode))
316038fd1498Szrj     {
316138fd1498Szrj       delete_unreachable_blocks (), changed = true;
316238fd1498Szrj       if (!(mode & CLEANUP_NO_INSN_DEL))
316338fd1498Szrj 	{
316438fd1498Szrj 	  /* Try to remove some trivially dead insns when doing an expensive
316538fd1498Szrj 	     cleanup.  But delete_trivially_dead_insns doesn't work after
316638fd1498Szrj 	     reload (it only handles pseudos) and run_fast_dce is too costly
316738fd1498Szrj 	     to run in every iteration.
316838fd1498Szrj 
316938fd1498Szrj 	     For effective cross jumping, we really want to run a fast DCE to
317038fd1498Szrj 	     clean up any dead conditions, or they get in the way of performing
317138fd1498Szrj 	     useful tail merges.
317238fd1498Szrj 
317338fd1498Szrj 	     Other transformations in cleanup_cfg are not so sensitive to dead
317438fd1498Szrj 	     code, so delete_trivially_dead_insns or even doing nothing at all
317538fd1498Szrj 	     is good enough.  */
317638fd1498Szrj 	  if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
317738fd1498Szrj 	      && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
317838fd1498Szrj 	    break;
317938fd1498Szrj 	  if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occurred)
318038fd1498Szrj 	    run_fast_dce ();
318138fd1498Szrj 	}
318238fd1498Szrj       else
318338fd1498Szrj 	break;
318438fd1498Szrj     }
318538fd1498Szrj 
318638fd1498Szrj   if (mode & CLEANUP_CROSSJUMP)
318738fd1498Szrj     remove_fake_exit_edges ();
318838fd1498Szrj 
318938fd1498Szrj   /* Don't call delete_dead_jumptables in cfglayout mode, because
319038fd1498Szrj      that function assumes that jump tables are in the insns stream.
319138fd1498Szrj      But we also don't _have_ to delete dead jumptables in cfglayout
319238fd1498Szrj      mode because we shouldn't even be looking at things that are
319338fd1498Szrj      not in a basic block.  Dead jumptables are cleaned up when
319438fd1498Szrj      going out of cfglayout mode.  */
319538fd1498Szrj   if (!(mode & CLEANUP_CFGLAYOUT))
319638fd1498Szrj     delete_dead_jumptables ();
319738fd1498Szrj 
319838fd1498Szrj   /* ???  We probably do this way too often.  */
319938fd1498Szrj   if (current_loops
320038fd1498Szrj       && (changed
320138fd1498Szrj 	  || (mode & CLEANUP_CFG_CHANGED)))
320238fd1498Szrj     {
320338fd1498Szrj       timevar_push (TV_REPAIR_LOOPS);
320438fd1498Szrj       /* The above doesn't preserve dominance info if available.  */
320538fd1498Szrj       gcc_assert (!dom_info_available_p (CDI_DOMINATORS));
320638fd1498Szrj       calculate_dominance_info (CDI_DOMINATORS);
320738fd1498Szrj       fix_loop_structure (NULL);
320838fd1498Szrj       free_dominance_info (CDI_DOMINATORS);
320938fd1498Szrj       timevar_pop (TV_REPAIR_LOOPS);
321038fd1498Szrj     }
321138fd1498Szrj 
321238fd1498Szrj   timevar_pop (TV_CLEANUP_CFG);
321338fd1498Szrj 
321438fd1498Szrj   return changed;
321538fd1498Szrj }
321638fd1498Szrj 
321738fd1498Szrj namespace {
321838fd1498Szrj 
321938fd1498Szrj const pass_data pass_data_jump =
322038fd1498Szrj {
322138fd1498Szrj   RTL_PASS, /* type */
322238fd1498Szrj   "jump", /* name */
322338fd1498Szrj   OPTGROUP_NONE, /* optinfo_flags */
322438fd1498Szrj   TV_JUMP, /* tv_id */
322538fd1498Szrj   0, /* properties_required */
322638fd1498Szrj   0, /* properties_provided */
322738fd1498Szrj   0, /* properties_destroyed */
322838fd1498Szrj   0, /* todo_flags_start */
322938fd1498Szrj   0, /* todo_flags_finish */
323038fd1498Szrj };
323138fd1498Szrj 
323238fd1498Szrj class pass_jump : public rtl_opt_pass
323338fd1498Szrj {
323438fd1498Szrj public:
pass_jump(gcc::context * ctxt)323538fd1498Szrj   pass_jump (gcc::context *ctxt)
323638fd1498Szrj     : rtl_opt_pass (pass_data_jump, ctxt)
323738fd1498Szrj   {}
323838fd1498Szrj 
323938fd1498Szrj   /* opt_pass methods: */
324038fd1498Szrj   virtual unsigned int execute (function *);
324138fd1498Szrj 
324238fd1498Szrj }; // class pass_jump
324338fd1498Szrj 
324438fd1498Szrj unsigned int
execute(function *)324538fd1498Szrj pass_jump::execute (function *)
324638fd1498Szrj {
324738fd1498Szrj   delete_trivially_dead_insns (get_insns (), max_reg_num ());
324838fd1498Szrj   if (dump_file)
324938fd1498Szrj     dump_flow_info (dump_file, dump_flags);
325038fd1498Szrj   cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
325138fd1498Szrj 	       | (flag_thread_jumps ? CLEANUP_THREADING : 0));
325238fd1498Szrj   return 0;
325338fd1498Szrj }
325438fd1498Szrj 
325538fd1498Szrj } // anon namespace
325638fd1498Szrj 
325738fd1498Szrj rtl_opt_pass *
make_pass_jump(gcc::context * ctxt)325838fd1498Szrj make_pass_jump (gcc::context *ctxt)
325938fd1498Szrj {
326038fd1498Szrj   return new pass_jump (ctxt);
326138fd1498Szrj }
326238fd1498Szrj 
326338fd1498Szrj namespace {
326438fd1498Szrj 
326538fd1498Szrj const pass_data pass_data_jump2 =
326638fd1498Szrj {
326738fd1498Szrj   RTL_PASS, /* type */
326838fd1498Szrj   "jump2", /* name */
326938fd1498Szrj   OPTGROUP_NONE, /* optinfo_flags */
327038fd1498Szrj   TV_JUMP, /* tv_id */
327138fd1498Szrj   0, /* properties_required */
327238fd1498Szrj   0, /* properties_provided */
327338fd1498Szrj   0, /* properties_destroyed */
327438fd1498Szrj   0, /* todo_flags_start */
327538fd1498Szrj   0, /* todo_flags_finish */
327638fd1498Szrj };
327738fd1498Szrj 
327838fd1498Szrj class pass_jump2 : public rtl_opt_pass
327938fd1498Szrj {
328038fd1498Szrj public:
pass_jump2(gcc::context * ctxt)328138fd1498Szrj   pass_jump2 (gcc::context *ctxt)
328238fd1498Szrj     : rtl_opt_pass (pass_data_jump2, ctxt)
328338fd1498Szrj   {}
328438fd1498Szrj 
328538fd1498Szrj   /* opt_pass methods: */
execute(function *)328638fd1498Szrj   virtual unsigned int execute (function *)
328738fd1498Szrj     {
328838fd1498Szrj       cleanup_cfg (flag_crossjumping ? CLEANUP_CROSSJUMP : 0);
328938fd1498Szrj       return 0;
329038fd1498Szrj     }
329138fd1498Szrj 
329238fd1498Szrj }; // class pass_jump2
329338fd1498Szrj 
329438fd1498Szrj } // anon namespace
329538fd1498Szrj 
329638fd1498Szrj rtl_opt_pass *
make_pass_jump2(gcc::context * ctxt)329738fd1498Szrj make_pass_jump2 (gcc::context *ctxt)
329838fd1498Szrj {
329938fd1498Szrj   return new pass_jump2 (ctxt);
330038fd1498Szrj }
3301