138fd1498Szrj /* CFG cleanup for trees.
238fd1498Szrj    Copyright (C) 2001-2018 Free Software Foundation, Inc.
338fd1498Szrj 
438fd1498Szrj This file is part of GCC.
538fd1498Szrj 
638fd1498Szrj GCC is free software; you can redistribute it and/or modify
738fd1498Szrj it under the terms of the GNU General Public License as published by
838fd1498Szrj the Free Software Foundation; either version 3, or (at your option)
938fd1498Szrj any later version.
1038fd1498Szrj 
1138fd1498Szrj GCC is distributed in the hope that it will be useful,
1238fd1498Szrj but WITHOUT ANY WARRANTY; without even the implied warranty of
1338fd1498Szrj MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1438fd1498Szrj GNU General Public License for more details.
1538fd1498Szrj 
1638fd1498Szrj You should have received a copy of the GNU General Public License
1738fd1498Szrj along with GCC; see the file COPYING3.  If not see
1838fd1498Szrj <http://www.gnu.org/licenses/>.  */
1938fd1498Szrj 
2038fd1498Szrj #include "config.h"
2138fd1498Szrj #include "system.h"
2238fd1498Szrj #include "coretypes.h"
2338fd1498Szrj #include "backend.h"
2438fd1498Szrj #include "rtl.h"
2538fd1498Szrj #include "tree.h"
2638fd1498Szrj #include "gimple.h"
2738fd1498Szrj #include "cfghooks.h"
2838fd1498Szrj #include "tree-pass.h"
2938fd1498Szrj #include "ssa.h"
3038fd1498Szrj #include "diagnostic-core.h"
3138fd1498Szrj #include "fold-const.h"
3238fd1498Szrj #include "cfganal.h"
3338fd1498Szrj #include "cfgcleanup.h"
3438fd1498Szrj #include "tree-eh.h"
3538fd1498Szrj #include "gimplify.h"
3638fd1498Szrj #include "gimple-iterator.h"
3738fd1498Szrj #include "tree-cfg.h"
3838fd1498Szrj #include "tree-ssa-loop-manip.h"
3938fd1498Szrj #include "tree-dfa.h"
4038fd1498Szrj #include "tree-ssa.h"
4138fd1498Szrj #include "cfgloop.h"
4238fd1498Szrj #include "tree-scalar-evolution.h"
4338fd1498Szrj #include "gimple-match.h"
4438fd1498Szrj #include "gimple-fold.h"
4538fd1498Szrj #include "tree-ssa-loop-niter.h"
46*e215fc28Szrj #include "tree-into-ssa.h"
47*e215fc28Szrj #include "tree-cfgcleanup.h"
4838fd1498Szrj 
4938fd1498Szrj 
5038fd1498Szrj /* The set of blocks in that at least one of the following changes happened:
5138fd1498Szrj    -- the statement at the end of the block was changed
5238fd1498Szrj    -- the block was newly created
5338fd1498Szrj    -- the set of the predecessors of the block changed
5438fd1498Szrj    -- the set of the successors of the block changed
5538fd1498Szrj    ??? Maybe we could track these changes separately, since they determine
5638fd1498Szrj        what cleanups it makes sense to try on the block.  */
5738fd1498Szrj bitmap cfgcleanup_altered_bbs;
5838fd1498Szrj 
5938fd1498Szrj /* Remove any fallthru edge from EV.  Return true if an edge was removed.  */
6038fd1498Szrj 
6138fd1498Szrj static bool
remove_fallthru_edge(vec<edge,va_gc> * ev)6238fd1498Szrj remove_fallthru_edge (vec<edge, va_gc> *ev)
6338fd1498Szrj {
6438fd1498Szrj   edge_iterator ei;
6538fd1498Szrj   edge e;
6638fd1498Szrj 
6738fd1498Szrj   FOR_EACH_EDGE (e, ei, ev)
6838fd1498Szrj     if ((e->flags & EDGE_FALLTHRU) != 0)
6938fd1498Szrj       {
7038fd1498Szrj 	if (e->flags & EDGE_COMPLEX)
7138fd1498Szrj 	  e->flags &= ~EDGE_FALLTHRU;
7238fd1498Szrj 	else
7338fd1498Szrj 	  remove_edge_and_dominated_blocks (e);
7438fd1498Szrj 	return true;
7538fd1498Szrj       }
7638fd1498Szrj   return false;
7738fd1498Szrj }
7838fd1498Szrj 
7938fd1498Szrj /* Convert a SWTCH with single non-default case to gcond and replace it
8038fd1498Szrj    at GSI.  */
8138fd1498Szrj 
8238fd1498Szrj static bool
convert_single_case_switch(gswitch * swtch,gimple_stmt_iterator & gsi)8338fd1498Szrj convert_single_case_switch (gswitch *swtch, gimple_stmt_iterator &gsi)
8438fd1498Szrj {
8538fd1498Szrj   if (gimple_switch_num_labels (swtch) != 2)
8638fd1498Szrj     return false;
8738fd1498Szrj 
8838fd1498Szrj   tree index = gimple_switch_index (swtch);
8938fd1498Szrj   tree default_label = CASE_LABEL (gimple_switch_default_label (swtch));
9038fd1498Szrj   tree label = gimple_switch_label (swtch, 1);
9138fd1498Szrj   tree low = CASE_LOW (label);
9238fd1498Szrj   tree high = CASE_HIGH (label);
9338fd1498Szrj 
9438fd1498Szrj   basic_block default_bb = label_to_block_fn (cfun, default_label);
9538fd1498Szrj   basic_block case_bb = label_to_block_fn (cfun, CASE_LABEL (label));
9638fd1498Szrj 
9738fd1498Szrj   basic_block bb = gimple_bb (swtch);
9838fd1498Szrj   gcond *cond;
9938fd1498Szrj 
10038fd1498Szrj   /* Replace switch statement with condition statement.  */
10138fd1498Szrj   if (high)
10238fd1498Szrj     {
10338fd1498Szrj       tree lhs, rhs;
10438fd1498Szrj       generate_range_test (bb, index, low, high, &lhs, &rhs);
10538fd1498Szrj       cond = gimple_build_cond (LE_EXPR, lhs, rhs, NULL_TREE, NULL_TREE);
10638fd1498Szrj     }
10738fd1498Szrj   else
10838fd1498Szrj     cond = gimple_build_cond (EQ_EXPR, index,
10938fd1498Szrj 			      fold_convert (TREE_TYPE (index), low),
11038fd1498Szrj 			      NULL_TREE, NULL_TREE);
11138fd1498Szrj 
11238fd1498Szrj   gsi_replace (&gsi, cond, true);
11338fd1498Szrj 
11438fd1498Szrj   /* Update edges.  */
11538fd1498Szrj   edge case_edge = find_edge (bb, case_bb);
11638fd1498Szrj   edge default_edge = find_edge (bb, default_bb);
11738fd1498Szrj 
11838fd1498Szrj   case_edge->flags |= EDGE_TRUE_VALUE;
11938fd1498Szrj   default_edge->flags |= EDGE_FALSE_VALUE;
12038fd1498Szrj   return true;
12138fd1498Szrj }
12238fd1498Szrj 
12338fd1498Szrj /* Disconnect an unreachable block in the control expression starting
12438fd1498Szrj    at block BB.  */
12538fd1498Szrj 
12638fd1498Szrj static bool
cleanup_control_expr_graph(basic_block bb,gimple_stmt_iterator gsi)12738fd1498Szrj cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
12838fd1498Szrj {
12938fd1498Szrj   edge taken_edge;
13038fd1498Szrj   bool retval = false;
13138fd1498Szrj   gimple *stmt = gsi_stmt (gsi);
13238fd1498Szrj 
13338fd1498Szrj   if (!single_succ_p (bb))
13438fd1498Szrj     {
13538fd1498Szrj       edge e;
13638fd1498Szrj       edge_iterator ei;
13738fd1498Szrj       bool warned;
13838fd1498Szrj       tree val = NULL_TREE;
13938fd1498Szrj 
14038fd1498Szrj       /* Try to convert a switch with just a single non-default case to
14138fd1498Szrj 	 GIMPLE condition.  */
14238fd1498Szrj       if (gimple_code (stmt) == GIMPLE_SWITCH
14338fd1498Szrj 	  && convert_single_case_switch (as_a<gswitch *> (stmt), gsi))
14438fd1498Szrj 	stmt = gsi_stmt (gsi);
14538fd1498Szrj 
14638fd1498Szrj       fold_defer_overflow_warnings ();
14738fd1498Szrj       switch (gimple_code (stmt))
14838fd1498Szrj 	{
14938fd1498Szrj 	case GIMPLE_COND:
15038fd1498Szrj 	  {
15138fd1498Szrj 	    code_helper rcode;
15238fd1498Szrj 	    tree ops[3] = {};
15338fd1498Szrj 	    if (gimple_simplify (stmt, &rcode, ops, NULL, no_follow_ssa_edges,
15438fd1498Szrj 				 no_follow_ssa_edges)
15538fd1498Szrj 		&& rcode == INTEGER_CST)
15638fd1498Szrj 	      val = ops[0];
15738fd1498Szrj 	  }
15838fd1498Szrj 	  break;
15938fd1498Szrj 
16038fd1498Szrj 	case GIMPLE_SWITCH:
16138fd1498Szrj 	  val = gimple_switch_index (as_a <gswitch *> (stmt));
16238fd1498Szrj 	  break;
16338fd1498Szrj 
16438fd1498Szrj 	default:
16538fd1498Szrj 	  ;
16638fd1498Szrj 	}
16738fd1498Szrj       taken_edge = find_taken_edge (bb, val);
16838fd1498Szrj       if (!taken_edge)
16938fd1498Szrj 	{
17038fd1498Szrj 	  fold_undefer_and_ignore_overflow_warnings ();
17138fd1498Szrj 	  return false;
17238fd1498Szrj 	}
17338fd1498Szrj 
17438fd1498Szrj       /* Remove all the edges except the one that is always executed.  */
17538fd1498Szrj       warned = false;
17638fd1498Szrj       for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
17738fd1498Szrj 	{
17838fd1498Szrj 	  if (e != taken_edge)
17938fd1498Szrj 	    {
18038fd1498Szrj 	      if (!warned)
18138fd1498Szrj 		{
18238fd1498Szrj 		  fold_undefer_overflow_warnings
18338fd1498Szrj 		    (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
18438fd1498Szrj 		  warned = true;
18538fd1498Szrj 		}
18638fd1498Szrj 
18738fd1498Szrj 	      taken_edge->probability += e->probability;
18838fd1498Szrj 	      remove_edge_and_dominated_blocks (e);
18938fd1498Szrj 	      retval = true;
19038fd1498Szrj 	    }
19138fd1498Szrj 	  else
19238fd1498Szrj 	    ei_next (&ei);
19338fd1498Szrj 	}
19438fd1498Szrj       if (!warned)
19538fd1498Szrj 	fold_undefer_and_ignore_overflow_warnings ();
19638fd1498Szrj     }
19738fd1498Szrj   else
19838fd1498Szrj     taken_edge = single_succ_edge (bb);
19938fd1498Szrj 
20038fd1498Szrj   bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
20138fd1498Szrj   gsi_remove (&gsi, true);
20238fd1498Szrj   taken_edge->flags = EDGE_FALLTHRU;
20338fd1498Szrj 
20438fd1498Szrj   return retval;
20538fd1498Szrj }
20638fd1498Szrj 
20738fd1498Szrj /* Cleanup the GF_CALL_CTRL_ALTERING flag according to
20838fd1498Szrj    to updated gimple_call_flags.  */
20938fd1498Szrj 
21038fd1498Szrj static void
cleanup_call_ctrl_altering_flag(gimple * bb_end)21138fd1498Szrj cleanup_call_ctrl_altering_flag (gimple *bb_end)
21238fd1498Szrj {
21338fd1498Szrj   if (!is_gimple_call (bb_end)
21438fd1498Szrj       || !gimple_call_ctrl_altering_p (bb_end))
21538fd1498Szrj     return;
21638fd1498Szrj 
21738fd1498Szrj   int flags = gimple_call_flags (bb_end);
21838fd1498Szrj   if (((flags & (ECF_CONST | ECF_PURE))
21938fd1498Szrj        && !(flags & ECF_LOOPING_CONST_OR_PURE))
22038fd1498Szrj       || (flags & ECF_LEAF))
22138fd1498Szrj     gimple_call_set_ctrl_altering (bb_end, false);
22238fd1498Szrj }
22338fd1498Szrj 
22438fd1498Szrj /* Try to remove superfluous control structures in basic block BB.  Returns
22538fd1498Szrj    true if anything changes.  */
22638fd1498Szrj 
22738fd1498Szrj static bool
cleanup_control_flow_bb(basic_block bb)22838fd1498Szrj cleanup_control_flow_bb (basic_block bb)
22938fd1498Szrj {
23038fd1498Szrj   gimple_stmt_iterator gsi;
23138fd1498Szrj   bool retval = false;
23238fd1498Szrj   gimple *stmt;
23338fd1498Szrj 
23438fd1498Szrj   /* If the last statement of the block could throw and now cannot,
23538fd1498Szrj      we need to prune cfg.  */
23638fd1498Szrj   retval |= gimple_purge_dead_eh_edges (bb);
23738fd1498Szrj 
23838fd1498Szrj   gsi = gsi_last_nondebug_bb (bb);
23938fd1498Szrj   if (gsi_end_p (gsi))
24038fd1498Szrj     return retval;
24138fd1498Szrj 
24238fd1498Szrj   stmt = gsi_stmt (gsi);
24338fd1498Szrj 
24438fd1498Szrj   /* Try to cleanup ctrl altering flag for call which ends bb.  */
24538fd1498Szrj   cleanup_call_ctrl_altering_flag (stmt);
24638fd1498Szrj 
24738fd1498Szrj   if (gimple_code (stmt) == GIMPLE_COND
24838fd1498Szrj       || gimple_code (stmt) == GIMPLE_SWITCH)
24938fd1498Szrj     {
25038fd1498Szrj       gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
25138fd1498Szrj       retval |= cleanup_control_expr_graph (bb, gsi);
25238fd1498Szrj     }
25338fd1498Szrj   else if (gimple_code (stmt) == GIMPLE_GOTO
25438fd1498Szrj 	   && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
25538fd1498Szrj 	   && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
25638fd1498Szrj 	       == LABEL_DECL))
25738fd1498Szrj     {
25838fd1498Szrj       /* If we had a computed goto which has a compile-time determinable
25938fd1498Szrj 	 destination, then we can eliminate the goto.  */
26038fd1498Szrj       edge e;
26138fd1498Szrj       tree label;
26238fd1498Szrj       edge_iterator ei;
26338fd1498Szrj       basic_block target_block;
26438fd1498Szrj 
26538fd1498Szrj       gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
26638fd1498Szrj       /* First look at all the outgoing edges.  Delete any outgoing
26738fd1498Szrj 	 edges which do not go to the right block.  For the one
26838fd1498Szrj 	 edge which goes to the right block, fix up its flags.  */
26938fd1498Szrj       label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
27038fd1498Szrj       if (DECL_CONTEXT (label) != cfun->decl)
27138fd1498Szrj 	return retval;
27238fd1498Szrj       target_block = label_to_block (label);
27338fd1498Szrj       for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
27438fd1498Szrj 	{
27538fd1498Szrj 	  if (e->dest != target_block)
27638fd1498Szrj 	    remove_edge_and_dominated_blocks (e);
27738fd1498Szrj 	  else
27838fd1498Szrj 	    {
27938fd1498Szrj 	      /* Turn off the EDGE_ABNORMAL flag.  */
28038fd1498Szrj 	      e->flags &= ~EDGE_ABNORMAL;
28138fd1498Szrj 
28238fd1498Szrj 	      /* And set EDGE_FALLTHRU.  */
28338fd1498Szrj 	      e->flags |= EDGE_FALLTHRU;
28438fd1498Szrj 	      ei_next (&ei);
28538fd1498Szrj 	    }
28638fd1498Szrj 	}
28738fd1498Szrj 
28838fd1498Szrj       bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
28938fd1498Szrj       bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
29038fd1498Szrj 
29138fd1498Szrj       /* Remove the GOTO_EXPR as it is not needed.  The CFG has all the
29238fd1498Szrj 	 relevant information we need.  */
29338fd1498Szrj       gsi_remove (&gsi, true);
29438fd1498Szrj       retval = true;
29538fd1498Szrj     }
29638fd1498Szrj 
29738fd1498Szrj   /* Check for indirect calls that have been turned into
29838fd1498Szrj      noreturn calls.  */
29938fd1498Szrj   else if (is_gimple_call (stmt)
30038fd1498Szrj 	   && gimple_call_noreturn_p (stmt))
30138fd1498Szrj     {
30238fd1498Szrj       /* If there are debug stmts after the noreturn call, remove them
30338fd1498Szrj 	 now, they should be all unreachable anyway.  */
30438fd1498Szrj       for (gsi_next (&gsi); !gsi_end_p (gsi); )
30538fd1498Szrj 	gsi_remove (&gsi, true);
30638fd1498Szrj       if (remove_fallthru_edge (bb->succs))
30738fd1498Szrj 	retval = true;
30838fd1498Szrj     }
30938fd1498Szrj 
31038fd1498Szrj   return retval;
31138fd1498Szrj }
31238fd1498Szrj 
31338fd1498Szrj /* Return true if basic block BB does nothing except pass control
31438fd1498Szrj    flow to another block and that we can safely insert a label at
31538fd1498Szrj    the start of the successor block.
31638fd1498Szrj 
31738fd1498Szrj    As a precondition, we require that BB be not equal to
31838fd1498Szrj    the entry block.  */
31938fd1498Szrj 
32038fd1498Szrj static bool
tree_forwarder_block_p(basic_block bb,bool phi_wanted)32138fd1498Szrj tree_forwarder_block_p (basic_block bb, bool phi_wanted)
32238fd1498Szrj {
32338fd1498Szrj   gimple_stmt_iterator gsi;
32438fd1498Szrj   location_t locus;
32538fd1498Szrj 
32638fd1498Szrj   /* BB must have a single outgoing edge.  */
32738fd1498Szrj   if (single_succ_p (bb) != 1
32838fd1498Szrj       /* If PHI_WANTED is false, BB must not have any PHI nodes.
32938fd1498Szrj 	 Otherwise, BB must have PHI nodes.  */
33038fd1498Szrj       || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
33138fd1498Szrj       /* BB may not be a predecessor of the exit block.  */
33238fd1498Szrj       || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
33338fd1498Szrj       /* Nor should this be an infinite loop.  */
33438fd1498Szrj       || single_succ (bb) == bb
33538fd1498Szrj       /* BB may not have an abnormal outgoing edge.  */
33638fd1498Szrj       || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
33738fd1498Szrj     return false;
33838fd1498Szrj 
33938fd1498Szrj   gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun));
34038fd1498Szrj 
34138fd1498Szrj   locus = single_succ_edge (bb)->goto_locus;
34238fd1498Szrj 
34338fd1498Szrj   /* There should not be an edge coming from entry, or an EH edge.  */
34438fd1498Szrj   {
34538fd1498Szrj     edge_iterator ei;
34638fd1498Szrj     edge e;
34738fd1498Szrj 
34838fd1498Szrj     FOR_EACH_EDGE (e, ei, bb->preds)
34938fd1498Szrj       if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || (e->flags & EDGE_EH))
35038fd1498Szrj 	return false;
35138fd1498Szrj       /* If goto_locus of any of the edges differs, prevent removing
35238fd1498Szrj 	 the forwarder block for -O0.  */
35338fd1498Szrj       else if (optimize == 0 && e->goto_locus != locus)
35438fd1498Szrj 	return false;
35538fd1498Szrj   }
35638fd1498Szrj 
35738fd1498Szrj   /* Now walk through the statements backward.  We can ignore labels,
35838fd1498Szrj      anything else means this is not a forwarder block.  */
35938fd1498Szrj   for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
36038fd1498Szrj     {
36138fd1498Szrj       gimple *stmt = gsi_stmt (gsi);
36238fd1498Szrj 
36338fd1498Szrj       switch (gimple_code (stmt))
36438fd1498Szrj 	{
36538fd1498Szrj 	case GIMPLE_LABEL:
36638fd1498Szrj 	  if (DECL_NONLOCAL (gimple_label_label (as_a <glabel *> (stmt))))
36738fd1498Szrj 	    return false;
36838fd1498Szrj 	  if (optimize == 0 && gimple_location (stmt) != locus)
36938fd1498Szrj 	    return false;
37038fd1498Szrj 	  break;
37138fd1498Szrj 
37238fd1498Szrj 	  /* ??? For now, hope there's a corresponding debug
37338fd1498Szrj 	     assignment at the destination.  */
37438fd1498Szrj 	case GIMPLE_DEBUG:
37538fd1498Szrj 	  break;
37638fd1498Szrj 
37738fd1498Szrj 	default:
37838fd1498Szrj 	  return false;
37938fd1498Szrj 	}
38038fd1498Szrj     }
38138fd1498Szrj 
38238fd1498Szrj   if (current_loops)
38338fd1498Szrj     {
38438fd1498Szrj       basic_block dest;
38538fd1498Szrj       /* Protect loop headers.  */
38638fd1498Szrj       if (bb_loop_header_p (bb))
38738fd1498Szrj 	return false;
38838fd1498Szrj 
38938fd1498Szrj       dest = EDGE_SUCC (bb, 0)->dest;
39038fd1498Szrj       /* Protect loop preheaders and latches if requested.  */
39138fd1498Szrj       if (dest->loop_father->header == dest)
39238fd1498Szrj 	{
39338fd1498Szrj 	  if (bb->loop_father == dest->loop_father)
39438fd1498Szrj 	    {
39538fd1498Szrj 	      if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
39638fd1498Szrj 		return false;
39738fd1498Szrj 	      /* If bb doesn't have a single predecessor we'd make this
39838fd1498Szrj 		 loop have multiple latches.  Don't do that if that
39938fd1498Szrj 		 would in turn require disambiguating them.  */
40038fd1498Szrj 	      return (single_pred_p (bb)
40138fd1498Szrj 		      || loops_state_satisfies_p
40238fd1498Szrj 		      	   (LOOPS_MAY_HAVE_MULTIPLE_LATCHES));
40338fd1498Szrj 	    }
40438fd1498Szrj 	  else if (bb->loop_father == loop_outer (dest->loop_father))
40538fd1498Szrj 	    return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS);
40638fd1498Szrj 	  /* Always preserve other edges into loop headers that are
40738fd1498Szrj 	     not simple latches or preheaders.  */
40838fd1498Szrj 	  return false;
40938fd1498Szrj 	}
41038fd1498Szrj     }
41138fd1498Szrj 
41238fd1498Szrj   return true;
41338fd1498Szrj }
41438fd1498Szrj 
41538fd1498Szrj /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
41638fd1498Szrj    those alternatives are equal in each of the PHI nodes, then return
41738fd1498Szrj    true, else return false.  */
41838fd1498Szrj 
41938fd1498Szrj static bool
phi_alternatives_equal(basic_block dest,edge e1,edge e2)42038fd1498Szrj phi_alternatives_equal (basic_block dest, edge e1, edge e2)
42138fd1498Szrj {
42238fd1498Szrj   int n1 = e1->dest_idx;
42338fd1498Szrj   int n2 = e2->dest_idx;
42438fd1498Szrj   gphi_iterator gsi;
42538fd1498Szrj 
42638fd1498Szrj   for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
42738fd1498Szrj     {
42838fd1498Szrj       gphi *phi = gsi.phi ();
42938fd1498Szrj       tree val1 = gimple_phi_arg_def (phi, n1);
43038fd1498Szrj       tree val2 = gimple_phi_arg_def (phi, n2);
43138fd1498Szrj 
43238fd1498Szrj       gcc_assert (val1 != NULL_TREE);
43338fd1498Szrj       gcc_assert (val2 != NULL_TREE);
43438fd1498Szrj 
43538fd1498Szrj       if (!operand_equal_for_phi_arg_p (val1, val2))
43638fd1498Szrj 	return false;
43738fd1498Szrj     }
43838fd1498Szrj 
43938fd1498Szrj   return true;
44038fd1498Szrj }
44138fd1498Szrj 
44238fd1498Szrj /* Removes forwarder block BB.  Returns false if this failed.  */
44338fd1498Szrj 
44438fd1498Szrj static bool
remove_forwarder_block(basic_block bb)44538fd1498Szrj remove_forwarder_block (basic_block bb)
44638fd1498Szrj {
44738fd1498Szrj   edge succ = single_succ_edge (bb), e, s;
44838fd1498Szrj   basic_block dest = succ->dest;
44938fd1498Szrj   gimple *stmt;
45038fd1498Szrj   edge_iterator ei;
45138fd1498Szrj   gimple_stmt_iterator gsi, gsi_to;
45238fd1498Szrj   bool can_move_debug_stmts;
45338fd1498Szrj 
45438fd1498Szrj   /* We check for infinite loops already in tree_forwarder_block_p.
45538fd1498Szrj      However it may happen that the infinite loop is created
45638fd1498Szrj      afterwards due to removal of forwarders.  */
45738fd1498Szrj   if (dest == bb)
45838fd1498Szrj     return false;
45938fd1498Szrj 
46038fd1498Szrj   /* If the destination block consists of a nonlocal label or is a
46138fd1498Szrj      EH landing pad, do not merge it.  */
46238fd1498Szrj   stmt = first_stmt (dest);
46338fd1498Szrj   if (stmt)
46438fd1498Szrj     if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
46538fd1498Szrj       if (DECL_NONLOCAL (gimple_label_label (label_stmt))
46638fd1498Szrj 	  || EH_LANDING_PAD_NR (gimple_label_label (label_stmt)) != 0)
46738fd1498Szrj 	return false;
46838fd1498Szrj 
46938fd1498Szrj   /* If there is an abnormal edge to basic block BB, but not into
47038fd1498Szrj      dest, problems might occur during removal of the phi node at out
47138fd1498Szrj      of ssa due to overlapping live ranges of registers.
47238fd1498Szrj 
47338fd1498Szrj      If there is an abnormal edge in DEST, the problems would occur
47438fd1498Szrj      anyway since cleanup_dead_labels would then merge the labels for
47538fd1498Szrj      two different eh regions, and rest of exception handling code
47638fd1498Szrj      does not like it.
47738fd1498Szrj 
47838fd1498Szrj      So if there is an abnormal edge to BB, proceed only if there is
47938fd1498Szrj      no abnormal edge to DEST and there are no phi nodes in DEST.  */
48038fd1498Szrj   if (bb_has_abnormal_pred (bb)
48138fd1498Szrj       && (bb_has_abnormal_pred (dest)
48238fd1498Szrj 	  || !gimple_seq_empty_p (phi_nodes (dest))))
48338fd1498Szrj     return false;
48438fd1498Szrj 
48538fd1498Szrj   /* If there are phi nodes in DEST, and some of the blocks that are
48638fd1498Szrj      predecessors of BB are also predecessors of DEST, check that the
48738fd1498Szrj      phi node arguments match.  */
48838fd1498Szrj   if (!gimple_seq_empty_p (phi_nodes (dest)))
48938fd1498Szrj     {
49038fd1498Szrj       FOR_EACH_EDGE (e, ei, bb->preds)
49138fd1498Szrj 	{
49238fd1498Szrj 	  s = find_edge (e->src, dest);
49338fd1498Szrj 	  if (!s)
49438fd1498Szrj 	    continue;
49538fd1498Szrj 
49638fd1498Szrj 	  if (!phi_alternatives_equal (dest, succ, s))
49738fd1498Szrj 	    return false;
49838fd1498Szrj 	}
49938fd1498Szrj     }
50038fd1498Szrj 
50138fd1498Szrj   can_move_debug_stmts = MAY_HAVE_DEBUG_STMTS && single_pred_p (dest);
50238fd1498Szrj 
50338fd1498Szrj   basic_block pred = NULL;
50438fd1498Szrj   if (single_pred_p (bb))
50538fd1498Szrj     pred = single_pred (bb);
50638fd1498Szrj 
50738fd1498Szrj   /* Redirect the edges.  */
50838fd1498Szrj   for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
50938fd1498Szrj     {
51038fd1498Szrj       bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
51138fd1498Szrj 
51238fd1498Szrj       if (e->flags & EDGE_ABNORMAL)
51338fd1498Szrj 	{
51438fd1498Szrj 	  /* If there is an abnormal edge, redirect it anyway, and
51538fd1498Szrj 	     move the labels to the new block to make it legal.  */
51638fd1498Szrj 	  s = redirect_edge_succ_nodup (e, dest);
51738fd1498Szrj 	}
51838fd1498Szrj       else
51938fd1498Szrj 	s = redirect_edge_and_branch (e, dest);
52038fd1498Szrj 
52138fd1498Szrj       if (s == e)
52238fd1498Szrj 	{
52338fd1498Szrj 	  /* Create arguments for the phi nodes, since the edge was not
52438fd1498Szrj 	     here before.  */
52538fd1498Szrj 	  for (gphi_iterator psi = gsi_start_phis (dest);
52638fd1498Szrj 	       !gsi_end_p (psi);
52738fd1498Szrj 	       gsi_next (&psi))
52838fd1498Szrj 	    {
52938fd1498Szrj 	      gphi *phi = psi.phi ();
53038fd1498Szrj 	      source_location l = gimple_phi_arg_location_from_edge (phi, succ);
53138fd1498Szrj 	      tree def = gimple_phi_arg_def (phi, succ->dest_idx);
53238fd1498Szrj 	      add_phi_arg (phi, unshare_expr (def), s, l);
53338fd1498Szrj 	    }
53438fd1498Szrj 	}
53538fd1498Szrj     }
53638fd1498Szrj 
53738fd1498Szrj   /* Move nonlocal labels and computed goto targets as well as user
53838fd1498Szrj      defined labels and labels with an EH landing pad number to the
53938fd1498Szrj      new block, so that the redirection of the abnormal edges works,
54038fd1498Szrj      jump targets end up in a sane place and debug information for
54138fd1498Szrj      labels is retained.  */
54238fd1498Szrj   gsi_to = gsi_start_bb (dest);
54338fd1498Szrj   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
54438fd1498Szrj     {
54538fd1498Szrj       stmt = gsi_stmt (gsi);
54638fd1498Szrj       if (is_gimple_debug (stmt))
54738fd1498Szrj 	break;
54838fd1498Szrj 
54938fd1498Szrj       /* Forwarder blocks can only contain labels and debug stmts, and
55038fd1498Szrj 	 labels must come first, so if we get to this point, we know
55138fd1498Szrj 	 we're looking at a label.  */
55238fd1498Szrj       tree decl = gimple_label_label (as_a <glabel *> (stmt));
55338fd1498Szrj       if (EH_LANDING_PAD_NR (decl) != 0
55438fd1498Szrj 	  || DECL_NONLOCAL (decl)
55538fd1498Szrj 	  || FORCED_LABEL (decl)
55638fd1498Szrj 	  || !DECL_ARTIFICIAL (decl))
55738fd1498Szrj 	gsi_move_before (&gsi, &gsi_to);
55838fd1498Szrj       else
55938fd1498Szrj 	gsi_next (&gsi);
56038fd1498Szrj     }
56138fd1498Szrj 
56238fd1498Szrj   /* Move debug statements if the destination has a single predecessor.  */
56338fd1498Szrj   if (can_move_debug_stmts && !gsi_end_p (gsi))
56438fd1498Szrj     {
56538fd1498Szrj       gsi_to = gsi_after_labels (dest);
56638fd1498Szrj       do
56738fd1498Szrj 	{
56838fd1498Szrj 	  gimple *debug = gsi_stmt (gsi);
56938fd1498Szrj 	  gcc_assert (is_gimple_debug (debug));
57038fd1498Szrj 	  gsi_move_before (&gsi, &gsi_to);
57138fd1498Szrj 	}
57238fd1498Szrj       while (!gsi_end_p (gsi));
57338fd1498Szrj     }
57438fd1498Szrj 
57538fd1498Szrj   bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
57638fd1498Szrj 
57738fd1498Szrj   /* Update the dominators.  */
57838fd1498Szrj   if (dom_info_available_p (CDI_DOMINATORS))
57938fd1498Szrj     {
58038fd1498Szrj       basic_block dom, dombb, domdest;
58138fd1498Szrj 
58238fd1498Szrj       dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
58338fd1498Szrj       domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
58438fd1498Szrj       if (domdest == bb)
58538fd1498Szrj 	{
58638fd1498Szrj 	  /* Shortcut to avoid calling (relatively expensive)
58738fd1498Szrj 	     nearest_common_dominator unless necessary.  */
58838fd1498Szrj 	  dom = dombb;
58938fd1498Szrj 	}
59038fd1498Szrj       else
59138fd1498Szrj 	dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
59238fd1498Szrj 
59338fd1498Szrj       set_immediate_dominator (CDI_DOMINATORS, dest, dom);
59438fd1498Szrj     }
59538fd1498Szrj 
59638fd1498Szrj   /* Adjust latch infomation of BB's parent loop as otherwise
59738fd1498Szrj      the cfg hook has a hard time not to kill the loop.  */
59838fd1498Szrj   if (current_loops && bb->loop_father->latch == bb)
59938fd1498Szrj     bb->loop_father->latch = pred;
60038fd1498Szrj 
60138fd1498Szrj   /* And kill the forwarder block.  */
60238fd1498Szrj   delete_basic_block (bb);
60338fd1498Szrj 
60438fd1498Szrj   return true;
60538fd1498Szrj }
60638fd1498Szrj 
60738fd1498Szrj /* STMT is a call that has been discovered noreturn.  Split the
60838fd1498Szrj    block to prepare fixing up the CFG and remove LHS.
60938fd1498Szrj    Return true if cleanup-cfg needs to run.  */
61038fd1498Szrj 
61138fd1498Szrj bool
fixup_noreturn_call(gimple * stmt)61238fd1498Szrj fixup_noreturn_call (gimple *stmt)
61338fd1498Szrj {
61438fd1498Szrj   basic_block bb = gimple_bb (stmt);
61538fd1498Szrj   bool changed = false;
61638fd1498Szrj 
61738fd1498Szrj   if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
61838fd1498Szrj     return false;
61938fd1498Szrj 
62038fd1498Szrj   /* First split basic block if stmt is not last.  */
62138fd1498Szrj   if (stmt != gsi_stmt (gsi_last_bb (bb)))
62238fd1498Szrj     {
62338fd1498Szrj       if (stmt == gsi_stmt (gsi_last_nondebug_bb (bb)))
62438fd1498Szrj 	{
62538fd1498Szrj 	  /* Don't split if there are only debug stmts
62638fd1498Szrj 	     after stmt, that can result in -fcompare-debug
62738fd1498Szrj 	     failures.  Remove the debug stmts instead,
62838fd1498Szrj 	     they should be all unreachable anyway.  */
62938fd1498Szrj 	  gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
63038fd1498Szrj 	  for (gsi_next (&gsi); !gsi_end_p (gsi); )
63138fd1498Szrj 	    gsi_remove (&gsi, true);
63238fd1498Szrj 	}
63338fd1498Szrj       else
63438fd1498Szrj 	{
63538fd1498Szrj 	  split_block (bb, stmt);
63638fd1498Szrj 	  changed = true;
63738fd1498Szrj 	}
63838fd1498Szrj     }
63938fd1498Szrj 
64038fd1498Szrj   /* If there is an LHS, remove it, but only if its type has fixed size.
64138fd1498Szrj      The LHS will need to be recreated during RTL expansion and creating
64238fd1498Szrj      temporaries of variable-sized types is not supported.  Also don't
64338fd1498Szrj      do this with TREE_ADDRESSABLE types, as assign_temp will abort.
64438fd1498Szrj      Drop LHS regardless of TREE_ADDRESSABLE, if the function call
64538fd1498Szrj      has been changed into a call that does not return a value, like
64638fd1498Szrj      __builtin_unreachable or __cxa_pure_virtual.  */
64738fd1498Szrj   tree lhs = gimple_call_lhs (stmt);
64838fd1498Szrj   if (lhs
64938fd1498Szrj       && (should_remove_lhs_p (lhs)
65038fd1498Szrj 	  || VOID_TYPE_P (TREE_TYPE (gimple_call_fntype (stmt)))))
65138fd1498Szrj     {
65238fd1498Szrj       gimple_call_set_lhs (stmt, NULL_TREE);
65338fd1498Szrj 
65438fd1498Szrj       /* We need to fix up the SSA name to avoid checking errors.  */
65538fd1498Szrj       if (TREE_CODE (lhs) == SSA_NAME)
65638fd1498Szrj 	{
65738fd1498Szrj 	  tree new_var = create_tmp_reg (TREE_TYPE (lhs));
65838fd1498Szrj 	  SET_SSA_NAME_VAR_OR_IDENTIFIER (lhs, new_var);
65938fd1498Szrj 	  SSA_NAME_DEF_STMT (lhs) = gimple_build_nop ();
66038fd1498Szrj 	  set_ssa_default_def (cfun, new_var, lhs);
66138fd1498Szrj 	}
66238fd1498Szrj 
66338fd1498Szrj       update_stmt (stmt);
66438fd1498Szrj     }
66538fd1498Szrj 
66638fd1498Szrj   /* Mark the call as altering control flow.  */
66738fd1498Szrj   if (!gimple_call_ctrl_altering_p (stmt))
66838fd1498Szrj     {
66938fd1498Szrj       gimple_call_set_ctrl_altering (stmt, true);
67038fd1498Szrj       changed = true;
67138fd1498Szrj     }
67238fd1498Szrj 
67338fd1498Szrj   return changed;
67438fd1498Szrj }
67538fd1498Szrj 
67638fd1498Szrj /* Return true if we want to merge BB1 and BB2 into a single block.  */
67738fd1498Szrj 
67838fd1498Szrj static bool
want_merge_blocks_p(basic_block bb1,basic_block bb2)67938fd1498Szrj want_merge_blocks_p (basic_block bb1, basic_block bb2)
68038fd1498Szrj {
68138fd1498Szrj   if (!can_merge_blocks_p (bb1, bb2))
68238fd1498Szrj     return false;
68338fd1498Szrj   gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb1);
68438fd1498Szrj   if (gsi_end_p (gsi) || !stmt_can_terminate_bb_p (gsi_stmt (gsi)))
68538fd1498Szrj     return true;
68638fd1498Szrj   return bb1->count.ok_for_merging (bb2->count);
68738fd1498Szrj }
68838fd1498Szrj 
68938fd1498Szrj 
69038fd1498Szrj /* Tries to cleanup cfg in basic block BB.  Returns true if anything
69138fd1498Szrj    changes.  */
69238fd1498Szrj 
69338fd1498Szrj static bool
cleanup_tree_cfg_bb(basic_block bb)69438fd1498Szrj cleanup_tree_cfg_bb (basic_block bb)
69538fd1498Szrj {
69638fd1498Szrj   if (tree_forwarder_block_p (bb, false)
69738fd1498Szrj       && remove_forwarder_block (bb))
69838fd1498Szrj     return true;
69938fd1498Szrj 
70038fd1498Szrj   /* If there is a merge opportunity with the predecessor
70138fd1498Szrj      do nothing now but wait until we process the predecessor.
70238fd1498Szrj      This happens when we visit BBs in a non-optimal order and
70338fd1498Szrj      avoids quadratic behavior with adjusting stmts BB pointer.  */
70438fd1498Szrj   if (single_pred_p (bb)
70538fd1498Szrj       && want_merge_blocks_p (single_pred (bb), bb))
70638fd1498Szrj     /* But make sure we _do_ visit it.  When we remove unreachable paths
70738fd1498Szrj        ending in a backedge we fail to mark the destinations predecessors
70838fd1498Szrj        as changed.  */
70938fd1498Szrj     bitmap_set_bit (cfgcleanup_altered_bbs, single_pred (bb)->index);
71038fd1498Szrj 
71138fd1498Szrj   /* Merging the blocks may create new opportunities for folding
71238fd1498Szrj      conditional branches (due to the elimination of single-valued PHI
71338fd1498Szrj      nodes).  */
71438fd1498Szrj   else if (single_succ_p (bb)
71538fd1498Szrj 	   && want_merge_blocks_p (bb, single_succ (bb)))
71638fd1498Szrj     {
71738fd1498Szrj       merge_blocks (bb, single_succ (bb));
71838fd1498Szrj       return true;
71938fd1498Szrj     }
72038fd1498Szrj 
72138fd1498Szrj   return false;
72238fd1498Szrj }
72338fd1498Szrj 
72438fd1498Szrj /* Do cleanup_control_flow_bb in PRE order.  */
72538fd1498Szrj 
72638fd1498Szrj static bool
cleanup_control_flow_pre()72738fd1498Szrj cleanup_control_flow_pre ()
72838fd1498Szrj {
72938fd1498Szrj   bool retval = false;
73038fd1498Szrj 
73138fd1498Szrj   auto_vec<edge_iterator, 20> stack (n_basic_blocks_for_fn (cfun) + 1);
73238fd1498Szrj   auto_sbitmap visited (last_basic_block_for_fn (cfun));
73338fd1498Szrj   bitmap_clear (visited);
73438fd1498Szrj 
73538fd1498Szrj   stack.quick_push (ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs));
73638fd1498Szrj 
73738fd1498Szrj   while (! stack.is_empty ())
73838fd1498Szrj     {
73938fd1498Szrj       /* Look at the edge on the top of the stack.  */
74038fd1498Szrj       edge_iterator ei = stack.last ();
74138fd1498Szrj       basic_block dest = ei_edge (ei)->dest;
74238fd1498Szrj 
74338fd1498Szrj       if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
74438fd1498Szrj 	  && ! bitmap_bit_p (visited, dest->index))
74538fd1498Szrj 	{
74638fd1498Szrj 	  bitmap_set_bit (visited, dest->index);
74738fd1498Szrj 	  retval |= cleanup_control_flow_bb (dest);
74838fd1498Szrj 	  if (EDGE_COUNT (dest->succs) > 0)
74938fd1498Szrj 	    stack.quick_push (ei_start (dest->succs));
75038fd1498Szrj 	}
75138fd1498Szrj       else
75238fd1498Szrj 	{
75338fd1498Szrj 	  if (!ei_one_before_end_p (ei))
75438fd1498Szrj 	    ei_next (&stack.last ());
75538fd1498Szrj 	  else
75638fd1498Szrj 	    stack.pop ();
75738fd1498Szrj 	}
75838fd1498Szrj     }
75938fd1498Szrj 
76038fd1498Szrj   return retval;
76138fd1498Szrj }
76238fd1498Szrj 
76338fd1498Szrj /* Iterate the cfg cleanups, while anything changes.  */
76438fd1498Szrj 
76538fd1498Szrj static bool
cleanup_tree_cfg_1(unsigned ssa_update_flags)766*e215fc28Szrj cleanup_tree_cfg_1 (unsigned ssa_update_flags)
76738fd1498Szrj {
76838fd1498Szrj   bool retval = false;
76938fd1498Szrj   basic_block bb;
77038fd1498Szrj   unsigned i, n;
77138fd1498Szrj 
77238fd1498Szrj   /* Prepare the worklists of altered blocks.  */
77338fd1498Szrj   cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
77438fd1498Szrj 
77538fd1498Szrj   /* During forwarder block cleanup, we may redirect edges out of
77638fd1498Szrj      SWITCH_EXPRs, which can get expensive.  So we want to enable
77738fd1498Szrj      recording of edge to CASE_LABEL_EXPR.  */
77838fd1498Szrj   start_recording_case_labels ();
77938fd1498Szrj 
78038fd1498Szrj   /* We cannot use FOR_EACH_BB_FN for the BB iterations below
78138fd1498Szrj      since the basic blocks may get removed.  */
78238fd1498Szrj 
78338fd1498Szrj   /* Start by iterating over all basic blocks in PRE order looking for
78438fd1498Szrj      edge removal opportunities.  Do this first because incoming SSA form
78538fd1498Szrj      may be invalid and we want to avoid performing SSA related tasks such
78638fd1498Szrj      as propgating out a PHI node during BB merging in that state.  */
78738fd1498Szrj   retval |= cleanup_control_flow_pre ();
78838fd1498Szrj 
78938fd1498Szrj   /* After doing the above SSA form should be valid (or an update SSA
79038fd1498Szrj      should be required).  */
791*e215fc28Szrj   if (ssa_update_flags)
792*e215fc28Szrj     update_ssa (ssa_update_flags);
79338fd1498Szrj 
79438fd1498Szrj   /* Continue by iterating over all basic blocks looking for BB merging
79538fd1498Szrj      opportunities.  */
79638fd1498Szrj   n = last_basic_block_for_fn (cfun);
79738fd1498Szrj   for (i = NUM_FIXED_BLOCKS; i < n; i++)
79838fd1498Szrj     {
79938fd1498Szrj       bb = BASIC_BLOCK_FOR_FN (cfun, i);
80038fd1498Szrj       if (bb)
80138fd1498Szrj 	retval |= cleanup_tree_cfg_bb (bb);
80238fd1498Szrj     }
80338fd1498Szrj 
80438fd1498Szrj   /* Now process the altered blocks, as long as any are available.  */
80538fd1498Szrj   while (!bitmap_empty_p (cfgcleanup_altered_bbs))
80638fd1498Szrj     {
80738fd1498Szrj       i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
80838fd1498Szrj       bitmap_clear_bit (cfgcleanup_altered_bbs, i);
80938fd1498Szrj       if (i < NUM_FIXED_BLOCKS)
81038fd1498Szrj 	continue;
81138fd1498Szrj 
81238fd1498Szrj       bb = BASIC_BLOCK_FOR_FN (cfun, i);
81338fd1498Szrj       if (!bb)
81438fd1498Szrj 	continue;
81538fd1498Szrj 
81638fd1498Szrj       retval |= cleanup_control_flow_bb (bb);
81738fd1498Szrj       retval |= cleanup_tree_cfg_bb (bb);
81838fd1498Szrj     }
81938fd1498Szrj 
82038fd1498Szrj   end_recording_case_labels ();
82138fd1498Szrj   BITMAP_FREE (cfgcleanup_altered_bbs);
82238fd1498Szrj   return retval;
82338fd1498Szrj }
82438fd1498Szrj 
82538fd1498Szrj static bool
mfb_keep_latches(edge e)82638fd1498Szrj mfb_keep_latches (edge e)
82738fd1498Szrj {
82838fd1498Szrj   return ! dominated_by_p (CDI_DOMINATORS, e->src, e->dest);
82938fd1498Szrj }
83038fd1498Szrj 
83138fd1498Szrj /* Remove unreachable blocks and other miscellaneous clean up work.
83238fd1498Szrj    Return true if the flowgraph was modified, false otherwise.  */
83338fd1498Szrj 
83438fd1498Szrj static bool
cleanup_tree_cfg_noloop(unsigned ssa_update_flags)835*e215fc28Szrj cleanup_tree_cfg_noloop (unsigned ssa_update_flags)
83638fd1498Szrj {
83738fd1498Szrj   bool changed;
83838fd1498Szrj 
83938fd1498Szrj   timevar_push (TV_TREE_CLEANUP_CFG);
84038fd1498Szrj 
84138fd1498Szrj   /* Iterate until there are no more cleanups left to do.  If any
84238fd1498Szrj      iteration changed the flowgraph, set CHANGED to true.
84338fd1498Szrj 
84438fd1498Szrj      If dominance information is available, there cannot be any unreachable
84538fd1498Szrj      blocks.  */
84638fd1498Szrj   if (!dom_info_available_p (CDI_DOMINATORS))
84738fd1498Szrj     {
84838fd1498Szrj       changed = delete_unreachable_blocks ();
84938fd1498Szrj       calculate_dominance_info (CDI_DOMINATORS);
85038fd1498Szrj     }
85138fd1498Szrj   else
85238fd1498Szrj     {
85338fd1498Szrj       checking_verify_dominators (CDI_DOMINATORS);
85438fd1498Szrj       changed = false;
85538fd1498Szrj     }
85638fd1498Szrj 
85738fd1498Szrj   /* Ensure that we have single entries into loop headers.  Otherwise
85838fd1498Szrj      if one of the entries is becoming a latch due to CFG cleanup
85938fd1498Szrj      (from formerly being part of an irreducible region) then we mess
86038fd1498Szrj      up loop fixup and associate the old loop with a different region
86138fd1498Szrj      which makes niter upper bounds invalid.  See for example PR80549.
86238fd1498Szrj      This needs to be done before we remove trivially dead edges as
86338fd1498Szrj      we need to capture the dominance state before the pending transform.  */
86438fd1498Szrj   if (current_loops)
86538fd1498Szrj     {
86638fd1498Szrj       loop_p loop;
86738fd1498Szrj       unsigned i;
86838fd1498Szrj       FOR_EACH_VEC_ELT (*get_loops (cfun), i, loop)
86938fd1498Szrj 	if (loop && loop->header)
87038fd1498Szrj 	  {
87138fd1498Szrj 	    basic_block bb = loop->header;
87238fd1498Szrj 	    edge_iterator ei;
87338fd1498Szrj 	    edge e;
87438fd1498Szrj 	    bool found_latch = false;
87538fd1498Szrj 	    bool any_abnormal = false;
87638fd1498Szrj 	    unsigned n = 0;
87738fd1498Szrj 	    /* We are only interested in preserving existing loops, but
87838fd1498Szrj 	       we need to check whether they are still real and of course
87938fd1498Szrj 	       if we need to add a preheader at all.  */
88038fd1498Szrj 	    FOR_EACH_EDGE (e, ei, bb->preds)
88138fd1498Szrj 	      {
88238fd1498Szrj 		if (e->flags & EDGE_ABNORMAL)
88338fd1498Szrj 		  {
88438fd1498Szrj 		    any_abnormal = true;
88538fd1498Szrj 		    break;
88638fd1498Szrj 		  }
88738fd1498Szrj 		if (dominated_by_p (CDI_DOMINATORS, e->src, bb))
88838fd1498Szrj 		  {
88938fd1498Szrj 		    found_latch = true;
89038fd1498Szrj 		    continue;
89138fd1498Szrj 		  }
89238fd1498Szrj 		n++;
89338fd1498Szrj 	      }
89438fd1498Szrj 	    /* If we have more than one entry to the loop header
89538fd1498Szrj 	       create a forwarder.  */
89638fd1498Szrj 	    if (found_latch && ! any_abnormal && n > 1)
89738fd1498Szrj 	      {
89838fd1498Szrj 		edge fallthru = make_forwarder_block (bb, mfb_keep_latches,
89938fd1498Szrj 						      NULL);
90038fd1498Szrj 		loop->header = fallthru->dest;
90138fd1498Szrj 		if (! loops_state_satisfies_p (LOOPS_NEED_FIXUP))
90238fd1498Szrj 		  {
90338fd1498Szrj 		    /* The loop updating from the CFG hook is incomplete
90438fd1498Szrj 		       when we have multiple latches, fixup manually.  */
90538fd1498Szrj 		    remove_bb_from_loops (fallthru->src);
90638fd1498Szrj 		    loop_p cloop = loop;
90738fd1498Szrj 		    FOR_EACH_EDGE (e, ei, fallthru->src->preds)
90838fd1498Szrj 		      cloop = find_common_loop (cloop, e->src->loop_father);
90938fd1498Szrj 		    add_bb_to_loop (fallthru->src, cloop);
91038fd1498Szrj 		  }
91138fd1498Szrj 	      }
91238fd1498Szrj 	  }
91338fd1498Szrj     }
91438fd1498Szrj 
915*e215fc28Szrj   changed |= cleanup_tree_cfg_1 (ssa_update_flags);
91638fd1498Szrj 
91738fd1498Szrj   gcc_assert (dom_info_available_p (CDI_DOMINATORS));
91838fd1498Szrj 
91938fd1498Szrj   /* Do not renumber blocks if the SCEV cache is active, it is indexed by
92038fd1498Szrj      basic-block numbers.  */
92138fd1498Szrj   if (! scev_initialized_p ())
92238fd1498Szrj     compact_blocks ();
92338fd1498Szrj 
92438fd1498Szrj   checking_verify_flow_info ();
92538fd1498Szrj 
92638fd1498Szrj   timevar_pop (TV_TREE_CLEANUP_CFG);
92738fd1498Szrj 
92838fd1498Szrj   if (changed && current_loops)
92938fd1498Szrj     {
93038fd1498Szrj       /* Removing edges and/or blocks may make recorded bounds refer
93138fd1498Szrj          to stale GIMPLE stmts now, so clear them.  */
93238fd1498Szrj       free_numbers_of_iterations_estimates (cfun);
93338fd1498Szrj       loops_state_set (LOOPS_NEED_FIXUP);
93438fd1498Szrj     }
93538fd1498Szrj 
93638fd1498Szrj   return changed;
93738fd1498Szrj }
93838fd1498Szrj 
93938fd1498Szrj /* Repairs loop structures.  */
94038fd1498Szrj 
94138fd1498Szrj static void
repair_loop_structures(void)94238fd1498Szrj repair_loop_structures (void)
94338fd1498Szrj {
94438fd1498Szrj   bitmap changed_bbs;
94538fd1498Szrj   unsigned n_new_loops;
94638fd1498Szrj 
94738fd1498Szrj   calculate_dominance_info (CDI_DOMINATORS);
94838fd1498Szrj 
94938fd1498Szrj   timevar_push (TV_REPAIR_LOOPS);
95038fd1498Szrj   changed_bbs = BITMAP_ALLOC (NULL);
95138fd1498Szrj   n_new_loops = fix_loop_structure (changed_bbs);
95238fd1498Szrj 
95338fd1498Szrj   /* This usually does nothing.  But sometimes parts of cfg that originally
95438fd1498Szrj      were inside a loop get out of it due to edge removal (since they
95538fd1498Szrj      become unreachable by back edges from latch).  Also a former
95638fd1498Szrj      irreducible loop can become reducible - in this case force a full
95738fd1498Szrj      rewrite into loop-closed SSA form.  */
95838fd1498Szrj   if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
95938fd1498Szrj     rewrite_into_loop_closed_ssa (n_new_loops ? NULL : changed_bbs,
96038fd1498Szrj 				  TODO_update_ssa);
96138fd1498Szrj 
96238fd1498Szrj   BITMAP_FREE (changed_bbs);
96338fd1498Szrj 
96438fd1498Szrj   checking_verify_loop_structure ();
96538fd1498Szrj   scev_reset ();
96638fd1498Szrj 
96738fd1498Szrj   timevar_pop (TV_REPAIR_LOOPS);
96838fd1498Szrj }
96938fd1498Szrj 
97038fd1498Szrj /* Cleanup cfg and repair loop structures.  */
97138fd1498Szrj 
97238fd1498Szrj bool
cleanup_tree_cfg(unsigned ssa_update_flags)973*e215fc28Szrj cleanup_tree_cfg (unsigned ssa_update_flags)
97438fd1498Szrj {
975*e215fc28Szrj   bool changed = cleanup_tree_cfg_noloop (ssa_update_flags);
97638fd1498Szrj 
97738fd1498Szrj   if (current_loops != NULL
97838fd1498Szrj       && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
97938fd1498Szrj     repair_loop_structures ();
98038fd1498Szrj 
98138fd1498Szrj   return changed;
98238fd1498Szrj }
98338fd1498Szrj 
98438fd1498Szrj /* Tries to merge the PHI nodes at BB into those at BB's sole successor.
98538fd1498Szrj    Returns true if successful.  */
98638fd1498Szrj 
98738fd1498Szrj static bool
remove_forwarder_block_with_phi(basic_block bb)98838fd1498Szrj remove_forwarder_block_with_phi (basic_block bb)
98938fd1498Szrj {
99038fd1498Szrj   edge succ = single_succ_edge (bb);
99138fd1498Szrj   basic_block dest = succ->dest;
99238fd1498Szrj   gimple *label;
99338fd1498Szrj   basic_block dombb, domdest, dom;
99438fd1498Szrj 
99538fd1498Szrj   /* We check for infinite loops already in tree_forwarder_block_p.
99638fd1498Szrj      However it may happen that the infinite loop is created
99738fd1498Szrj      afterwards due to removal of forwarders.  */
99838fd1498Szrj   if (dest == bb)
99938fd1498Szrj     return false;
100038fd1498Szrj 
100138fd1498Szrj   /* Removal of forwarders may expose new natural loops and thus
100238fd1498Szrj      a block may turn into a loop header.  */
100338fd1498Szrj   if (current_loops && bb_loop_header_p (bb))
100438fd1498Szrj     return false;
100538fd1498Szrj 
100638fd1498Szrj   /* If the destination block consists of a nonlocal label, do not
100738fd1498Szrj      merge it.  */
100838fd1498Szrj   label = first_stmt (dest);
100938fd1498Szrj   if (label)
101038fd1498Szrj     if (glabel *label_stmt = dyn_cast <glabel *> (label))
101138fd1498Szrj       if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
101238fd1498Szrj 	return false;
101338fd1498Szrj 
101438fd1498Szrj   /* Record BB's single pred in case we need to update the father
101538fd1498Szrj      loop's latch information later.  */
101638fd1498Szrj   basic_block pred = NULL;
101738fd1498Szrj   if (single_pred_p (bb))
101838fd1498Szrj     pred = single_pred (bb);
101938fd1498Szrj 
102038fd1498Szrj   /* Redirect each incoming edge to BB to DEST.  */
102138fd1498Szrj   while (EDGE_COUNT (bb->preds) > 0)
102238fd1498Szrj     {
102338fd1498Szrj       edge e = EDGE_PRED (bb, 0), s;
102438fd1498Szrj       gphi_iterator gsi;
102538fd1498Szrj 
102638fd1498Szrj       s = find_edge (e->src, dest);
102738fd1498Szrj       if (s)
102838fd1498Szrj 	{
102938fd1498Szrj 	  /* We already have an edge S from E->src to DEST.  If S and
103038fd1498Szrj 	     E->dest's sole successor edge have the same PHI arguments
103138fd1498Szrj 	     at DEST, redirect S to DEST.  */
103238fd1498Szrj 	  if (phi_alternatives_equal (dest, s, succ))
103338fd1498Szrj 	    {
103438fd1498Szrj 	      e = redirect_edge_and_branch (e, dest);
103538fd1498Szrj 	      redirect_edge_var_map_clear (e);
103638fd1498Szrj 	      continue;
103738fd1498Szrj 	    }
103838fd1498Szrj 
103938fd1498Szrj 	  /* PHI arguments are different.  Create a forwarder block by
104038fd1498Szrj 	     splitting E so that we can merge PHI arguments on E to
104138fd1498Szrj 	     DEST.  */
104238fd1498Szrj 	  e = single_succ_edge (split_edge (e));
104338fd1498Szrj 	}
104438fd1498Szrj       else
104538fd1498Szrj 	{
104638fd1498Szrj 	  /* If we merge the forwarder into a loop header verify if we
104738fd1498Szrj 	     are creating another loop latch edge.  If so, reset
104838fd1498Szrj 	     number of iteration information of the loop.  */
104938fd1498Szrj 	  if (dest->loop_father->header == dest
105038fd1498Szrj 	      && dominated_by_p (CDI_DOMINATORS, e->src, dest))
105138fd1498Szrj 	    {
105238fd1498Szrj 	      dest->loop_father->any_upper_bound = false;
105338fd1498Szrj 	      dest->loop_father->any_likely_upper_bound = false;
105438fd1498Szrj 	      free_numbers_of_iterations_estimates (dest->loop_father);
105538fd1498Szrj 	    }
105638fd1498Szrj 	}
105738fd1498Szrj 
105838fd1498Szrj       s = redirect_edge_and_branch (e, dest);
105938fd1498Szrj 
106038fd1498Szrj       /* redirect_edge_and_branch must not create a new edge.  */
106138fd1498Szrj       gcc_assert (s == e);
106238fd1498Szrj 
106338fd1498Szrj       /* Add to the PHI nodes at DEST each PHI argument removed at the
106438fd1498Szrj 	 destination of E.  */
106538fd1498Szrj       for (gsi = gsi_start_phis (dest);
106638fd1498Szrj 	   !gsi_end_p (gsi);
106738fd1498Szrj 	   gsi_next (&gsi))
106838fd1498Szrj 	{
106938fd1498Szrj 	  gphi *phi = gsi.phi ();
107038fd1498Szrj 	  tree def = gimple_phi_arg_def (phi, succ->dest_idx);
107138fd1498Szrj 	  source_location locus = gimple_phi_arg_location_from_edge (phi, succ);
107238fd1498Szrj 
107338fd1498Szrj 	  if (TREE_CODE (def) == SSA_NAME)
107438fd1498Szrj 	    {
107538fd1498Szrj 	      /* If DEF is one of the results of PHI nodes removed during
107638fd1498Szrj 		 redirection, replace it with the PHI argument that used
107738fd1498Szrj 		 to be on E.  */
107838fd1498Szrj 	      vec<edge_var_map> *head = redirect_edge_var_map_vector (e);
107938fd1498Szrj 	      size_t length = head ? head->length () : 0;
108038fd1498Szrj 	      for (size_t i = 0; i < length; i++)
108138fd1498Szrj 		{
108238fd1498Szrj 		  edge_var_map *vm = &(*head)[i];
108338fd1498Szrj 		  tree old_arg = redirect_edge_var_map_result (vm);
108438fd1498Szrj 		  tree new_arg = redirect_edge_var_map_def (vm);
108538fd1498Szrj 
108638fd1498Szrj 		  if (def == old_arg)
108738fd1498Szrj 		    {
108838fd1498Szrj 		      def = new_arg;
108938fd1498Szrj 		      locus = redirect_edge_var_map_location (vm);
109038fd1498Szrj 		      break;
109138fd1498Szrj 		    }
109238fd1498Szrj 		}
109338fd1498Szrj 	    }
109438fd1498Szrj 
109538fd1498Szrj 	  add_phi_arg (phi, def, s, locus);
109638fd1498Szrj 	}
109738fd1498Szrj 
109838fd1498Szrj       redirect_edge_var_map_clear (e);
109938fd1498Szrj     }
110038fd1498Szrj 
110138fd1498Szrj   /* Update the dominators.  */
110238fd1498Szrj   dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
110338fd1498Szrj   domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
110438fd1498Szrj   if (domdest == bb)
110538fd1498Szrj     {
110638fd1498Szrj       /* Shortcut to avoid calling (relatively expensive)
110738fd1498Szrj 	 nearest_common_dominator unless necessary.  */
110838fd1498Szrj       dom = dombb;
110938fd1498Szrj     }
111038fd1498Szrj   else
111138fd1498Szrj     dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
111238fd1498Szrj 
111338fd1498Szrj   set_immediate_dominator (CDI_DOMINATORS, dest, dom);
111438fd1498Szrj 
111538fd1498Szrj   /* Adjust latch infomation of BB's parent loop as otherwise
111638fd1498Szrj      the cfg hook has a hard time not to kill the loop.  */
111738fd1498Szrj   if (current_loops && bb->loop_father->latch == bb)
111838fd1498Szrj     bb->loop_father->latch = pred;
111938fd1498Szrj 
112038fd1498Szrj   /* Remove BB since all of BB's incoming edges have been redirected
112138fd1498Szrj      to DEST.  */
112238fd1498Szrj   delete_basic_block (bb);
112338fd1498Szrj 
112438fd1498Szrj   return true;
112538fd1498Szrj }
112638fd1498Szrj 
112738fd1498Szrj /* This pass merges PHI nodes if one feeds into another.  For example,
112838fd1498Szrj    suppose we have the following:
112938fd1498Szrj 
113038fd1498Szrj   goto <bb 9> (<L9>);
113138fd1498Szrj 
113238fd1498Szrj <L8>:;
113338fd1498Szrj   tem_17 = foo ();
113438fd1498Szrj 
113538fd1498Szrj   # tem_6 = PHI <tem_17(8), tem_23(7)>;
113638fd1498Szrj <L9>:;
113738fd1498Szrj 
113838fd1498Szrj   # tem_3 = PHI <tem_6(9), tem_2(5)>;
113938fd1498Szrj <L10>:;
114038fd1498Szrj 
114138fd1498Szrj   Then we merge the first PHI node into the second one like so:
114238fd1498Szrj 
114338fd1498Szrj   goto <bb 9> (<L10>);
114438fd1498Szrj 
114538fd1498Szrj <L8>:;
114638fd1498Szrj   tem_17 = foo ();
114738fd1498Szrj 
114838fd1498Szrj   # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
114938fd1498Szrj <L10>:;
115038fd1498Szrj */
115138fd1498Szrj 
115238fd1498Szrj namespace {
115338fd1498Szrj 
115438fd1498Szrj const pass_data pass_data_merge_phi =
115538fd1498Szrj {
115638fd1498Szrj   GIMPLE_PASS, /* type */
115738fd1498Szrj   "mergephi", /* name */
115838fd1498Szrj   OPTGROUP_NONE, /* optinfo_flags */
115938fd1498Szrj   TV_TREE_MERGE_PHI, /* tv_id */
116038fd1498Szrj   ( PROP_cfg | PROP_ssa ), /* properties_required */
116138fd1498Szrj   0, /* properties_provided */
116238fd1498Szrj   0, /* properties_destroyed */
116338fd1498Szrj   0, /* todo_flags_start */
116438fd1498Szrj   0, /* todo_flags_finish */
116538fd1498Szrj };
116638fd1498Szrj 
116738fd1498Szrj class pass_merge_phi : public gimple_opt_pass
116838fd1498Szrj {
116938fd1498Szrj public:
pass_merge_phi(gcc::context * ctxt)117038fd1498Szrj   pass_merge_phi (gcc::context *ctxt)
117138fd1498Szrj     : gimple_opt_pass (pass_data_merge_phi, ctxt)
117238fd1498Szrj   {}
117338fd1498Szrj 
117438fd1498Szrj   /* opt_pass methods: */
clone()117538fd1498Szrj   opt_pass * clone () { return new pass_merge_phi (m_ctxt); }
117638fd1498Szrj   virtual unsigned int execute (function *);
117738fd1498Szrj 
117838fd1498Szrj }; // class pass_merge_phi
117938fd1498Szrj 
118038fd1498Szrj unsigned int
execute(function * fun)118138fd1498Szrj pass_merge_phi::execute (function *fun)
118238fd1498Szrj {
118338fd1498Szrj   basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (fun));
118438fd1498Szrj   basic_block *current = worklist;
118538fd1498Szrj   basic_block bb;
118638fd1498Szrj 
118738fd1498Szrj   calculate_dominance_info (CDI_DOMINATORS);
118838fd1498Szrj 
118938fd1498Szrj   /* Find all PHI nodes that we may be able to merge.  */
119038fd1498Szrj   FOR_EACH_BB_FN (bb, fun)
119138fd1498Szrj     {
119238fd1498Szrj       basic_block dest;
119338fd1498Szrj 
119438fd1498Szrj       /* Look for a forwarder block with PHI nodes.  */
119538fd1498Szrj       if (!tree_forwarder_block_p (bb, true))
119638fd1498Szrj 	continue;
119738fd1498Szrj 
119838fd1498Szrj       dest = single_succ (bb);
119938fd1498Szrj 
120038fd1498Szrj       /* We have to feed into another basic block with PHI
120138fd1498Szrj 	 nodes.  */
120238fd1498Szrj       if (gimple_seq_empty_p (phi_nodes (dest))
120338fd1498Szrj 	  /* We don't want to deal with a basic block with
120438fd1498Szrj 	     abnormal edges.  */
120538fd1498Szrj 	  || bb_has_abnormal_pred (bb))
120638fd1498Szrj 	continue;
120738fd1498Szrj 
120838fd1498Szrj       if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
120938fd1498Szrj 	{
121038fd1498Szrj 	  /* If BB does not dominate DEST, then the PHI nodes at
121138fd1498Szrj 	     DEST must be the only users of the results of the PHI
121238fd1498Szrj 	     nodes at BB.  */
121338fd1498Szrj 	  *current++ = bb;
121438fd1498Szrj 	}
121538fd1498Szrj       else
121638fd1498Szrj 	{
121738fd1498Szrj 	  gphi_iterator gsi;
121838fd1498Szrj 	  unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
121938fd1498Szrj 
122038fd1498Szrj 	  /* BB dominates DEST.  There may be many users of the PHI
122138fd1498Szrj 	     nodes in BB.  However, there is still a trivial case we
122238fd1498Szrj 	     can handle.  If the result of every PHI in BB is used
122338fd1498Szrj 	     only by a PHI in DEST, then we can trivially merge the
122438fd1498Szrj 	     PHI nodes from BB into DEST.  */
122538fd1498Szrj 	  for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
122638fd1498Szrj 	       gsi_next (&gsi))
122738fd1498Szrj 	    {
122838fd1498Szrj 	      gphi *phi = gsi.phi ();
122938fd1498Szrj 	      tree result = gimple_phi_result (phi);
123038fd1498Szrj 	      use_operand_p imm_use;
123138fd1498Szrj 	      gimple *use_stmt;
123238fd1498Szrj 
123338fd1498Szrj 	      /* If the PHI's result is never used, then we can just
123438fd1498Szrj 		 ignore it.  */
123538fd1498Szrj 	      if (has_zero_uses (result))
123638fd1498Szrj 		continue;
123738fd1498Szrj 
123838fd1498Szrj 	      /* Get the single use of the result of this PHI node.  */
123938fd1498Szrj   	      if (!single_imm_use (result, &imm_use, &use_stmt)
124038fd1498Szrj 		  || gimple_code (use_stmt) != GIMPLE_PHI
124138fd1498Szrj 		  || gimple_bb (use_stmt) != dest
124238fd1498Szrj 		  || gimple_phi_arg_def (use_stmt, dest_idx) != result)
124338fd1498Szrj 		break;
124438fd1498Szrj 	    }
124538fd1498Szrj 
124638fd1498Szrj 	  /* If the loop above iterated through all the PHI nodes
124738fd1498Szrj 	     in BB, then we can merge the PHIs from BB into DEST.  */
124838fd1498Szrj 	  if (gsi_end_p (gsi))
124938fd1498Szrj 	    *current++ = bb;
125038fd1498Szrj 	}
125138fd1498Szrj     }
125238fd1498Szrj 
125338fd1498Szrj   /* Now let's drain WORKLIST.  */
125438fd1498Szrj   bool changed = false;
125538fd1498Szrj   while (current != worklist)
125638fd1498Szrj     {
125738fd1498Szrj       bb = *--current;
125838fd1498Szrj       changed |= remove_forwarder_block_with_phi (bb);
125938fd1498Szrj     }
126038fd1498Szrj   free (worklist);
126138fd1498Szrj 
126238fd1498Szrj   /* Removing forwarder blocks can cause formerly irreducible loops
126338fd1498Szrj      to become reducible if we merged two entry blocks.  */
126438fd1498Szrj   if (changed
126538fd1498Szrj       && current_loops)
126638fd1498Szrj     loops_state_set (LOOPS_NEED_FIXUP);
126738fd1498Szrj 
126838fd1498Szrj   return 0;
126938fd1498Szrj }
127038fd1498Szrj 
127138fd1498Szrj } // anon namespace
127238fd1498Szrj 
127338fd1498Szrj gimple_opt_pass *
make_pass_merge_phi(gcc::context * ctxt)127438fd1498Szrj make_pass_merge_phi (gcc::context *ctxt)
127538fd1498Szrj {
127638fd1498Szrj   return new pass_merge_phi (ctxt);
127738fd1498Szrj }
127838fd1498Szrj 
127938fd1498Szrj /* Pass: cleanup the CFG just before expanding trees to RTL.
128038fd1498Szrj    This is just a round of label cleanups and case node grouping
128138fd1498Szrj    because after the tree optimizers have run such cleanups may
128238fd1498Szrj    be necessary.  */
128338fd1498Szrj 
128438fd1498Szrj static unsigned int
execute_cleanup_cfg_post_optimizing(void)128538fd1498Szrj execute_cleanup_cfg_post_optimizing (void)
128638fd1498Szrj {
128738fd1498Szrj   unsigned int todo = execute_fixup_cfg ();
128838fd1498Szrj   if (cleanup_tree_cfg ())
128938fd1498Szrj     {
129038fd1498Szrj       todo &= ~TODO_cleanup_cfg;
129138fd1498Szrj       todo |= TODO_update_ssa;
129238fd1498Szrj     }
129338fd1498Szrj   maybe_remove_unreachable_handlers ();
129438fd1498Szrj   cleanup_dead_labels ();
129538fd1498Szrj   if (group_case_labels ())
129638fd1498Szrj     todo |= TODO_cleanup_cfg;
129738fd1498Szrj   if ((flag_compare_debug_opt || flag_compare_debug)
129838fd1498Szrj       && flag_dump_final_insns)
129938fd1498Szrj     {
130038fd1498Szrj       FILE *final_output = fopen (flag_dump_final_insns, "a");
130138fd1498Szrj 
130238fd1498Szrj       if (!final_output)
130338fd1498Szrj 	{
130438fd1498Szrj 	  error ("could not open final insn dump file %qs: %m",
130538fd1498Szrj 		 flag_dump_final_insns);
130638fd1498Szrj 	  flag_dump_final_insns = NULL;
130738fd1498Szrj 	}
130838fd1498Szrj       else
130938fd1498Szrj 	{
131038fd1498Szrj 	  int save_unnumbered = flag_dump_unnumbered;
131138fd1498Szrj 	  int save_noaddr = flag_dump_noaddr;
131238fd1498Szrj 
131338fd1498Szrj 	  flag_dump_noaddr = flag_dump_unnumbered = 1;
131438fd1498Szrj 	  fprintf (final_output, "\n");
131538fd1498Szrj 	  dump_enumerated_decls (final_output,
131638fd1498Szrj 				 dump_flags | TDF_SLIM | TDF_NOUID);
131738fd1498Szrj 	  flag_dump_noaddr = save_noaddr;
131838fd1498Szrj 	  flag_dump_unnumbered = save_unnumbered;
131938fd1498Szrj 	  if (fclose (final_output))
132038fd1498Szrj 	    {
132138fd1498Szrj 	      error ("could not close final insn dump file %qs: %m",
132238fd1498Szrj 		     flag_dump_final_insns);
132338fd1498Szrj 	      flag_dump_final_insns = NULL;
132438fd1498Szrj 	    }
132538fd1498Szrj 	}
132638fd1498Szrj     }
132738fd1498Szrj   return todo;
132838fd1498Szrj }
132938fd1498Szrj 
133038fd1498Szrj namespace {
133138fd1498Szrj 
133238fd1498Szrj const pass_data pass_data_cleanup_cfg_post_optimizing =
133338fd1498Szrj {
133438fd1498Szrj   GIMPLE_PASS, /* type */
133538fd1498Szrj   "optimized", /* name */
133638fd1498Szrj   OPTGROUP_NONE, /* optinfo_flags */
133738fd1498Szrj   TV_TREE_CLEANUP_CFG, /* tv_id */
133838fd1498Szrj   PROP_cfg, /* properties_required */
133938fd1498Szrj   0, /* properties_provided */
134038fd1498Szrj   0, /* properties_destroyed */
134138fd1498Szrj   0, /* todo_flags_start */
134238fd1498Szrj   TODO_remove_unused_locals, /* todo_flags_finish */
134338fd1498Szrj };
134438fd1498Szrj 
134538fd1498Szrj class pass_cleanup_cfg_post_optimizing : public gimple_opt_pass
134638fd1498Szrj {
134738fd1498Szrj public:
pass_cleanup_cfg_post_optimizing(gcc::context * ctxt)134838fd1498Szrj   pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
134938fd1498Szrj     : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing, ctxt)
135038fd1498Szrj   {}
135138fd1498Szrj 
135238fd1498Szrj   /* opt_pass methods: */
execute(function *)135338fd1498Szrj   virtual unsigned int execute (function *)
135438fd1498Szrj     {
135538fd1498Szrj       return execute_cleanup_cfg_post_optimizing ();
135638fd1498Szrj     }
135738fd1498Szrj 
135838fd1498Szrj }; // class pass_cleanup_cfg_post_optimizing
135938fd1498Szrj 
136038fd1498Szrj } // anon namespace
136138fd1498Szrj 
136238fd1498Szrj gimple_opt_pass *
make_pass_cleanup_cfg_post_optimizing(gcc::context * ctxt)136338fd1498Szrj make_pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
136438fd1498Szrj {
136538fd1498Szrj   return new pass_cleanup_cfg_post_optimizing (ctxt);
136638fd1498Szrj }
136738fd1498Szrj 
136838fd1498Szrj 
1369