1 /* CFG cleanup for trees.
2    Copyright (C) 2001-2018 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10 
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "diagnostic-core.h"
31 #include "fold-const.h"
32 #include "cfganal.h"
33 #include "cfgcleanup.h"
34 #include "tree-eh.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-ssa-loop-manip.h"
39 #include "tree-dfa.h"
40 #include "tree-ssa.h"
41 #include "cfgloop.h"
42 #include "tree-scalar-evolution.h"
43 #include "gimple-match.h"
44 #include "gimple-fold.h"
45 #include "tree-ssa-loop-niter.h"
46 #include "tree-into-ssa.h"
47 #include "tree-cfgcleanup.h"
48 
49 
50 /* The set of blocks in that at least one of the following changes happened:
51    -- the statement at the end of the block was changed
52    -- the block was newly created
53    -- the set of the predecessors of the block changed
54    -- the set of the successors of the block changed
55    ??? Maybe we could track these changes separately, since they determine
56        what cleanups it makes sense to try on the block.  */
57 bitmap cfgcleanup_altered_bbs;
58 
59 /* Remove any fallthru edge from EV.  Return true if an edge was removed.  */
60 
61 static bool
remove_fallthru_edge(vec<edge,va_gc> * ev)62 remove_fallthru_edge (vec<edge, va_gc> *ev)
63 {
64   edge_iterator ei;
65   edge e;
66 
67   FOR_EACH_EDGE (e, ei, ev)
68     if ((e->flags & EDGE_FALLTHRU) != 0)
69       {
70 	if (e->flags & EDGE_COMPLEX)
71 	  e->flags &= ~EDGE_FALLTHRU;
72 	else
73 	  remove_edge_and_dominated_blocks (e);
74 	return true;
75       }
76   return false;
77 }
78 
79 /* Convert a SWTCH with single non-default case to gcond and replace it
80    at GSI.  */
81 
82 static bool
convert_single_case_switch(gswitch * swtch,gimple_stmt_iterator & gsi)83 convert_single_case_switch (gswitch *swtch, gimple_stmt_iterator &gsi)
84 {
85   if (gimple_switch_num_labels (swtch) != 2)
86     return false;
87 
88   tree index = gimple_switch_index (swtch);
89   tree default_label = CASE_LABEL (gimple_switch_default_label (swtch));
90   tree label = gimple_switch_label (swtch, 1);
91   tree low = CASE_LOW (label);
92   tree high = CASE_HIGH (label);
93 
94   basic_block default_bb = label_to_block_fn (cfun, default_label);
95   basic_block case_bb = label_to_block_fn (cfun, CASE_LABEL (label));
96 
97   basic_block bb = gimple_bb (swtch);
98   gcond *cond;
99 
100   /* Replace switch statement with condition statement.  */
101   if (high)
102     {
103       tree lhs, rhs;
104       generate_range_test (bb, index, low, high, &lhs, &rhs);
105       cond = gimple_build_cond (LE_EXPR, lhs, rhs, NULL_TREE, NULL_TREE);
106     }
107   else
108     cond = gimple_build_cond (EQ_EXPR, index,
109 			      fold_convert (TREE_TYPE (index), low),
110 			      NULL_TREE, NULL_TREE);
111 
112   gsi_replace (&gsi, cond, true);
113 
114   /* Update edges.  */
115   edge case_edge = find_edge (bb, case_bb);
116   edge default_edge = find_edge (bb, default_bb);
117 
118   case_edge->flags |= EDGE_TRUE_VALUE;
119   default_edge->flags |= EDGE_FALSE_VALUE;
120   return true;
121 }
122 
123 /* Disconnect an unreachable block in the control expression starting
124    at block BB.  */
125 
126 static bool
cleanup_control_expr_graph(basic_block bb,gimple_stmt_iterator gsi)127 cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
128 {
129   edge taken_edge;
130   bool retval = false;
131   gimple *stmt = gsi_stmt (gsi);
132 
133   if (!single_succ_p (bb))
134     {
135       edge e;
136       edge_iterator ei;
137       bool warned;
138       tree val = NULL_TREE;
139 
140       /* Try to convert a switch with just a single non-default case to
141 	 GIMPLE condition.  */
142       if (gimple_code (stmt) == GIMPLE_SWITCH
143 	  && convert_single_case_switch (as_a<gswitch *> (stmt), gsi))
144 	stmt = gsi_stmt (gsi);
145 
146       fold_defer_overflow_warnings ();
147       switch (gimple_code (stmt))
148 	{
149 	case GIMPLE_COND:
150 	  {
151 	    code_helper rcode;
152 	    tree ops[3] = {};
153 	    if (gimple_simplify (stmt, &rcode, ops, NULL, no_follow_ssa_edges,
154 				 no_follow_ssa_edges)
155 		&& rcode == INTEGER_CST)
156 	      val = ops[0];
157 	  }
158 	  break;
159 
160 	case GIMPLE_SWITCH:
161 	  val = gimple_switch_index (as_a <gswitch *> (stmt));
162 	  break;
163 
164 	default:
165 	  ;
166 	}
167       taken_edge = find_taken_edge (bb, val);
168       if (!taken_edge)
169 	{
170 	  fold_undefer_and_ignore_overflow_warnings ();
171 	  return false;
172 	}
173 
174       /* Remove all the edges except the one that is always executed.  */
175       warned = false;
176       for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
177 	{
178 	  if (e != taken_edge)
179 	    {
180 	      if (!warned)
181 		{
182 		  fold_undefer_overflow_warnings
183 		    (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
184 		  warned = true;
185 		}
186 
187 	      taken_edge->probability += e->probability;
188 	      remove_edge_and_dominated_blocks (e);
189 	      retval = true;
190 	    }
191 	  else
192 	    ei_next (&ei);
193 	}
194       if (!warned)
195 	fold_undefer_and_ignore_overflow_warnings ();
196     }
197   else
198     taken_edge = single_succ_edge (bb);
199 
200   bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
201   gsi_remove (&gsi, true);
202   taken_edge->flags = EDGE_FALLTHRU;
203 
204   return retval;
205 }
206 
207 /* Cleanup the GF_CALL_CTRL_ALTERING flag according to
208    to updated gimple_call_flags.  */
209 
210 static void
cleanup_call_ctrl_altering_flag(gimple * bb_end)211 cleanup_call_ctrl_altering_flag (gimple *bb_end)
212 {
213   if (!is_gimple_call (bb_end)
214       || !gimple_call_ctrl_altering_p (bb_end))
215     return;
216 
217   int flags = gimple_call_flags (bb_end);
218   if (((flags & (ECF_CONST | ECF_PURE))
219        && !(flags & ECF_LOOPING_CONST_OR_PURE))
220       || (flags & ECF_LEAF))
221     gimple_call_set_ctrl_altering (bb_end, false);
222 }
223 
224 /* Try to remove superfluous control structures in basic block BB.  Returns
225    true if anything changes.  */
226 
227 static bool
cleanup_control_flow_bb(basic_block bb)228 cleanup_control_flow_bb (basic_block bb)
229 {
230   gimple_stmt_iterator gsi;
231   bool retval = false;
232   gimple *stmt;
233 
234   /* If the last statement of the block could throw and now cannot,
235      we need to prune cfg.  */
236   retval |= gimple_purge_dead_eh_edges (bb);
237 
238   gsi = gsi_last_nondebug_bb (bb);
239   if (gsi_end_p (gsi))
240     return retval;
241 
242   stmt = gsi_stmt (gsi);
243 
244   /* Try to cleanup ctrl altering flag for call which ends bb.  */
245   cleanup_call_ctrl_altering_flag (stmt);
246 
247   if (gimple_code (stmt) == GIMPLE_COND
248       || gimple_code (stmt) == GIMPLE_SWITCH)
249     {
250       gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
251       retval |= cleanup_control_expr_graph (bb, gsi);
252     }
253   else if (gimple_code (stmt) == GIMPLE_GOTO
254 	   && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
255 	   && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
256 	       == LABEL_DECL))
257     {
258       /* If we had a computed goto which has a compile-time determinable
259 	 destination, then we can eliminate the goto.  */
260       edge e;
261       tree label;
262       edge_iterator ei;
263       basic_block target_block;
264 
265       gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
266       /* First look at all the outgoing edges.  Delete any outgoing
267 	 edges which do not go to the right block.  For the one
268 	 edge which goes to the right block, fix up its flags.  */
269       label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
270       if (DECL_CONTEXT (label) != cfun->decl)
271 	return retval;
272       target_block = label_to_block (label);
273       for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
274 	{
275 	  if (e->dest != target_block)
276 	    remove_edge_and_dominated_blocks (e);
277 	  else
278 	    {
279 	      /* Turn off the EDGE_ABNORMAL flag.  */
280 	      e->flags &= ~EDGE_ABNORMAL;
281 
282 	      /* And set EDGE_FALLTHRU.  */
283 	      e->flags |= EDGE_FALLTHRU;
284 	      ei_next (&ei);
285 	    }
286 	}
287 
288       bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
289       bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
290 
291       /* Remove the GOTO_EXPR as it is not needed.  The CFG has all the
292 	 relevant information we need.  */
293       gsi_remove (&gsi, true);
294       retval = true;
295     }
296 
297   /* Check for indirect calls that have been turned into
298      noreturn calls.  */
299   else if (is_gimple_call (stmt)
300 	   && gimple_call_noreturn_p (stmt))
301     {
302       /* If there are debug stmts after the noreturn call, remove them
303 	 now, they should be all unreachable anyway.  */
304       for (gsi_next (&gsi); !gsi_end_p (gsi); )
305 	gsi_remove (&gsi, true);
306       if (remove_fallthru_edge (bb->succs))
307 	retval = true;
308     }
309 
310   return retval;
311 }
312 
313 /* Return true if basic block BB does nothing except pass control
314    flow to another block and that we can safely insert a label at
315    the start of the successor block.
316 
317    As a precondition, we require that BB be not equal to
318    the entry block.  */
319 
320 static bool
tree_forwarder_block_p(basic_block bb,bool phi_wanted)321 tree_forwarder_block_p (basic_block bb, bool phi_wanted)
322 {
323   gimple_stmt_iterator gsi;
324   location_t locus;
325 
326   /* BB must have a single outgoing edge.  */
327   if (single_succ_p (bb) != 1
328       /* If PHI_WANTED is false, BB must not have any PHI nodes.
329 	 Otherwise, BB must have PHI nodes.  */
330       || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
331       /* BB may not be a predecessor of the exit block.  */
332       || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
333       /* Nor should this be an infinite loop.  */
334       || single_succ (bb) == bb
335       /* BB may not have an abnormal outgoing edge.  */
336       || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
337     return false;
338 
339   gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun));
340 
341   locus = single_succ_edge (bb)->goto_locus;
342 
343   /* There should not be an edge coming from entry, or an EH edge.  */
344   {
345     edge_iterator ei;
346     edge e;
347 
348     FOR_EACH_EDGE (e, ei, bb->preds)
349       if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || (e->flags & EDGE_EH))
350 	return false;
351       /* If goto_locus of any of the edges differs, prevent removing
352 	 the forwarder block for -O0.  */
353       else if (optimize == 0 && e->goto_locus != locus)
354 	return false;
355   }
356 
357   /* Now walk through the statements backward.  We can ignore labels,
358      anything else means this is not a forwarder block.  */
359   for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
360     {
361       gimple *stmt = gsi_stmt (gsi);
362 
363       switch (gimple_code (stmt))
364 	{
365 	case GIMPLE_LABEL:
366 	  if (DECL_NONLOCAL (gimple_label_label (as_a <glabel *> (stmt))))
367 	    return false;
368 	  if (optimize == 0 && gimple_location (stmt) != locus)
369 	    return false;
370 	  break;
371 
372 	  /* ??? For now, hope there's a corresponding debug
373 	     assignment at the destination.  */
374 	case GIMPLE_DEBUG:
375 	  break;
376 
377 	default:
378 	  return false;
379 	}
380     }
381 
382   if (current_loops)
383     {
384       basic_block dest;
385       /* Protect loop headers.  */
386       if (bb_loop_header_p (bb))
387 	return false;
388 
389       dest = EDGE_SUCC (bb, 0)->dest;
390       /* Protect loop preheaders and latches if requested.  */
391       if (dest->loop_father->header == dest)
392 	{
393 	  if (bb->loop_father == dest->loop_father)
394 	    {
395 	      if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
396 		return false;
397 	      /* If bb doesn't have a single predecessor we'd make this
398 		 loop have multiple latches.  Don't do that if that
399 		 would in turn require disambiguating them.  */
400 	      return (single_pred_p (bb)
401 		      || loops_state_satisfies_p
402 		      	   (LOOPS_MAY_HAVE_MULTIPLE_LATCHES));
403 	    }
404 	  else if (bb->loop_father == loop_outer (dest->loop_father))
405 	    return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS);
406 	  /* Always preserve other edges into loop headers that are
407 	     not simple latches or preheaders.  */
408 	  return false;
409 	}
410     }
411 
412   return true;
413 }
414 
415 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
416    those alternatives are equal in each of the PHI nodes, then return
417    true, else return false.  */
418 
419 static bool
phi_alternatives_equal(basic_block dest,edge e1,edge e2)420 phi_alternatives_equal (basic_block dest, edge e1, edge e2)
421 {
422   int n1 = e1->dest_idx;
423   int n2 = e2->dest_idx;
424   gphi_iterator gsi;
425 
426   for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
427     {
428       gphi *phi = gsi.phi ();
429       tree val1 = gimple_phi_arg_def (phi, n1);
430       tree val2 = gimple_phi_arg_def (phi, n2);
431 
432       gcc_assert (val1 != NULL_TREE);
433       gcc_assert (val2 != NULL_TREE);
434 
435       if (!operand_equal_for_phi_arg_p (val1, val2))
436 	return false;
437     }
438 
439   return true;
440 }
441 
442 /* Removes forwarder block BB.  Returns false if this failed.  */
443 
444 static bool
remove_forwarder_block(basic_block bb)445 remove_forwarder_block (basic_block bb)
446 {
447   edge succ = single_succ_edge (bb), e, s;
448   basic_block dest = succ->dest;
449   gimple *stmt;
450   edge_iterator ei;
451   gimple_stmt_iterator gsi, gsi_to;
452   bool can_move_debug_stmts;
453 
454   /* We check for infinite loops already in tree_forwarder_block_p.
455      However it may happen that the infinite loop is created
456      afterwards due to removal of forwarders.  */
457   if (dest == bb)
458     return false;
459 
460   /* If the destination block consists of a nonlocal label or is a
461      EH landing pad, do not merge it.  */
462   stmt = first_stmt (dest);
463   if (stmt)
464     if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
465       if (DECL_NONLOCAL (gimple_label_label (label_stmt))
466 	  || EH_LANDING_PAD_NR (gimple_label_label (label_stmt)) != 0)
467 	return false;
468 
469   /* If there is an abnormal edge to basic block BB, but not into
470      dest, problems might occur during removal of the phi node at out
471      of ssa due to overlapping live ranges of registers.
472 
473      If there is an abnormal edge in DEST, the problems would occur
474      anyway since cleanup_dead_labels would then merge the labels for
475      two different eh regions, and rest of exception handling code
476      does not like it.
477 
478      So if there is an abnormal edge to BB, proceed only if there is
479      no abnormal edge to DEST and there are no phi nodes in DEST.  */
480   if (bb_has_abnormal_pred (bb)
481       && (bb_has_abnormal_pred (dest)
482 	  || !gimple_seq_empty_p (phi_nodes (dest))))
483     return false;
484 
485   /* If there are phi nodes in DEST, and some of the blocks that are
486      predecessors of BB are also predecessors of DEST, check that the
487      phi node arguments match.  */
488   if (!gimple_seq_empty_p (phi_nodes (dest)))
489     {
490       FOR_EACH_EDGE (e, ei, bb->preds)
491 	{
492 	  s = find_edge (e->src, dest);
493 	  if (!s)
494 	    continue;
495 
496 	  if (!phi_alternatives_equal (dest, succ, s))
497 	    return false;
498 	}
499     }
500 
501   can_move_debug_stmts = MAY_HAVE_DEBUG_STMTS && single_pred_p (dest);
502 
503   basic_block pred = NULL;
504   if (single_pred_p (bb))
505     pred = single_pred (bb);
506 
507   /* Redirect the edges.  */
508   for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
509     {
510       bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
511 
512       if (e->flags & EDGE_ABNORMAL)
513 	{
514 	  /* If there is an abnormal edge, redirect it anyway, and
515 	     move the labels to the new block to make it legal.  */
516 	  s = redirect_edge_succ_nodup (e, dest);
517 	}
518       else
519 	s = redirect_edge_and_branch (e, dest);
520 
521       if (s == e)
522 	{
523 	  /* Create arguments for the phi nodes, since the edge was not
524 	     here before.  */
525 	  for (gphi_iterator psi = gsi_start_phis (dest);
526 	       !gsi_end_p (psi);
527 	       gsi_next (&psi))
528 	    {
529 	      gphi *phi = psi.phi ();
530 	      source_location l = gimple_phi_arg_location_from_edge (phi, succ);
531 	      tree def = gimple_phi_arg_def (phi, succ->dest_idx);
532 	      add_phi_arg (phi, unshare_expr (def), s, l);
533 	    }
534 	}
535     }
536 
537   /* Move nonlocal labels and computed goto targets as well as user
538      defined labels and labels with an EH landing pad number to the
539      new block, so that the redirection of the abnormal edges works,
540      jump targets end up in a sane place and debug information for
541      labels is retained.  */
542   gsi_to = gsi_start_bb (dest);
543   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
544     {
545       stmt = gsi_stmt (gsi);
546       if (is_gimple_debug (stmt))
547 	break;
548 
549       /* Forwarder blocks can only contain labels and debug stmts, and
550 	 labels must come first, so if we get to this point, we know
551 	 we're looking at a label.  */
552       tree decl = gimple_label_label (as_a <glabel *> (stmt));
553       if (EH_LANDING_PAD_NR (decl) != 0
554 	  || DECL_NONLOCAL (decl)
555 	  || FORCED_LABEL (decl)
556 	  || !DECL_ARTIFICIAL (decl))
557 	gsi_move_before (&gsi, &gsi_to);
558       else
559 	gsi_next (&gsi);
560     }
561 
562   /* Move debug statements if the destination has a single predecessor.  */
563   if (can_move_debug_stmts && !gsi_end_p (gsi))
564     {
565       gsi_to = gsi_after_labels (dest);
566       do
567 	{
568 	  gimple *debug = gsi_stmt (gsi);
569 	  gcc_assert (is_gimple_debug (debug));
570 	  gsi_move_before (&gsi, &gsi_to);
571 	}
572       while (!gsi_end_p (gsi));
573     }
574 
575   bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
576 
577   /* Update the dominators.  */
578   if (dom_info_available_p (CDI_DOMINATORS))
579     {
580       basic_block dom, dombb, domdest;
581 
582       dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
583       domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
584       if (domdest == bb)
585 	{
586 	  /* Shortcut to avoid calling (relatively expensive)
587 	     nearest_common_dominator unless necessary.  */
588 	  dom = dombb;
589 	}
590       else
591 	dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
592 
593       set_immediate_dominator (CDI_DOMINATORS, dest, dom);
594     }
595 
596   /* Adjust latch infomation of BB's parent loop as otherwise
597      the cfg hook has a hard time not to kill the loop.  */
598   if (current_loops && bb->loop_father->latch == bb)
599     bb->loop_father->latch = pred;
600 
601   /* And kill the forwarder block.  */
602   delete_basic_block (bb);
603 
604   return true;
605 }
606 
607 /* STMT is a call that has been discovered noreturn.  Split the
608    block to prepare fixing up the CFG and remove LHS.
609    Return true if cleanup-cfg needs to run.  */
610 
611 bool
fixup_noreturn_call(gimple * stmt)612 fixup_noreturn_call (gimple *stmt)
613 {
614   basic_block bb = gimple_bb (stmt);
615   bool changed = false;
616 
617   if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
618     return false;
619 
620   /* First split basic block if stmt is not last.  */
621   if (stmt != gsi_stmt (gsi_last_bb (bb)))
622     {
623       if (stmt == gsi_stmt (gsi_last_nondebug_bb (bb)))
624 	{
625 	  /* Don't split if there are only debug stmts
626 	     after stmt, that can result in -fcompare-debug
627 	     failures.  Remove the debug stmts instead,
628 	     they should be all unreachable anyway.  */
629 	  gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
630 	  for (gsi_next (&gsi); !gsi_end_p (gsi); )
631 	    gsi_remove (&gsi, true);
632 	}
633       else
634 	{
635 	  split_block (bb, stmt);
636 	  changed = true;
637 	}
638     }
639 
640   /* If there is an LHS, remove it, but only if its type has fixed size.
641      The LHS will need to be recreated during RTL expansion and creating
642      temporaries of variable-sized types is not supported.  Also don't
643      do this with TREE_ADDRESSABLE types, as assign_temp will abort.
644      Drop LHS regardless of TREE_ADDRESSABLE, if the function call
645      has been changed into a call that does not return a value, like
646      __builtin_unreachable or __cxa_pure_virtual.  */
647   tree lhs = gimple_call_lhs (stmt);
648   if (lhs
649       && (should_remove_lhs_p (lhs)
650 	  || VOID_TYPE_P (TREE_TYPE (gimple_call_fntype (stmt)))))
651     {
652       gimple_call_set_lhs (stmt, NULL_TREE);
653 
654       /* We need to fix up the SSA name to avoid checking errors.  */
655       if (TREE_CODE (lhs) == SSA_NAME)
656 	{
657 	  tree new_var = create_tmp_reg (TREE_TYPE (lhs));
658 	  SET_SSA_NAME_VAR_OR_IDENTIFIER (lhs, new_var);
659 	  SSA_NAME_DEF_STMT (lhs) = gimple_build_nop ();
660 	  set_ssa_default_def (cfun, new_var, lhs);
661 	}
662 
663       update_stmt (stmt);
664     }
665 
666   /* Mark the call as altering control flow.  */
667   if (!gimple_call_ctrl_altering_p (stmt))
668     {
669       gimple_call_set_ctrl_altering (stmt, true);
670       changed = true;
671     }
672 
673   return changed;
674 }
675 
676 /* Return true if we want to merge BB1 and BB2 into a single block.  */
677 
678 static bool
want_merge_blocks_p(basic_block bb1,basic_block bb2)679 want_merge_blocks_p (basic_block bb1, basic_block bb2)
680 {
681   if (!can_merge_blocks_p (bb1, bb2))
682     return false;
683   gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb1);
684   if (gsi_end_p (gsi) || !stmt_can_terminate_bb_p (gsi_stmt (gsi)))
685     return true;
686   return bb1->count.ok_for_merging (bb2->count);
687 }
688 
689 
690 /* Tries to cleanup cfg in basic block BB.  Returns true if anything
691    changes.  */
692 
693 static bool
cleanup_tree_cfg_bb(basic_block bb)694 cleanup_tree_cfg_bb (basic_block bb)
695 {
696   if (tree_forwarder_block_p (bb, false)
697       && remove_forwarder_block (bb))
698     return true;
699 
700   /* If there is a merge opportunity with the predecessor
701      do nothing now but wait until we process the predecessor.
702      This happens when we visit BBs in a non-optimal order and
703      avoids quadratic behavior with adjusting stmts BB pointer.  */
704   if (single_pred_p (bb)
705       && want_merge_blocks_p (single_pred (bb), bb))
706     /* But make sure we _do_ visit it.  When we remove unreachable paths
707        ending in a backedge we fail to mark the destinations predecessors
708        as changed.  */
709     bitmap_set_bit (cfgcleanup_altered_bbs, single_pred (bb)->index);
710 
711   /* Merging the blocks may create new opportunities for folding
712      conditional branches (due to the elimination of single-valued PHI
713      nodes).  */
714   else if (single_succ_p (bb)
715 	   && want_merge_blocks_p (bb, single_succ (bb)))
716     {
717       merge_blocks (bb, single_succ (bb));
718       return true;
719     }
720 
721   return false;
722 }
723 
724 /* Do cleanup_control_flow_bb in PRE order.  */
725 
726 static bool
cleanup_control_flow_pre()727 cleanup_control_flow_pre ()
728 {
729   bool retval = false;
730 
731   auto_vec<edge_iterator, 20> stack (n_basic_blocks_for_fn (cfun) + 1);
732   auto_sbitmap visited (last_basic_block_for_fn (cfun));
733   bitmap_clear (visited);
734 
735   stack.quick_push (ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs));
736 
737   while (! stack.is_empty ())
738     {
739       /* Look at the edge on the top of the stack.  */
740       edge_iterator ei = stack.last ();
741       basic_block dest = ei_edge (ei)->dest;
742 
743       if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
744 	  && ! bitmap_bit_p (visited, dest->index))
745 	{
746 	  bitmap_set_bit (visited, dest->index);
747 	  retval |= cleanup_control_flow_bb (dest);
748 	  if (EDGE_COUNT (dest->succs) > 0)
749 	    stack.quick_push (ei_start (dest->succs));
750 	}
751       else
752 	{
753 	  if (!ei_one_before_end_p (ei))
754 	    ei_next (&stack.last ());
755 	  else
756 	    stack.pop ();
757 	}
758     }
759 
760   return retval;
761 }
762 
763 /* Iterate the cfg cleanups, while anything changes.  */
764 
765 static bool
cleanup_tree_cfg_1(unsigned ssa_update_flags)766 cleanup_tree_cfg_1 (unsigned ssa_update_flags)
767 {
768   bool retval = false;
769   basic_block bb;
770   unsigned i, n;
771 
772   /* Prepare the worklists of altered blocks.  */
773   cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
774 
775   /* During forwarder block cleanup, we may redirect edges out of
776      SWITCH_EXPRs, which can get expensive.  So we want to enable
777      recording of edge to CASE_LABEL_EXPR.  */
778   start_recording_case_labels ();
779 
780   /* We cannot use FOR_EACH_BB_FN for the BB iterations below
781      since the basic blocks may get removed.  */
782 
783   /* Start by iterating over all basic blocks in PRE order looking for
784      edge removal opportunities.  Do this first because incoming SSA form
785      may be invalid and we want to avoid performing SSA related tasks such
786      as propgating out a PHI node during BB merging in that state.  */
787   retval |= cleanup_control_flow_pre ();
788 
789   /* After doing the above SSA form should be valid (or an update SSA
790      should be required).  */
791   if (ssa_update_flags)
792     update_ssa (ssa_update_flags);
793 
794   /* Continue by iterating over all basic blocks looking for BB merging
795      opportunities.  */
796   n = last_basic_block_for_fn (cfun);
797   for (i = NUM_FIXED_BLOCKS; i < n; i++)
798     {
799       bb = BASIC_BLOCK_FOR_FN (cfun, i);
800       if (bb)
801 	retval |= cleanup_tree_cfg_bb (bb);
802     }
803 
804   /* Now process the altered blocks, as long as any are available.  */
805   while (!bitmap_empty_p (cfgcleanup_altered_bbs))
806     {
807       i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
808       bitmap_clear_bit (cfgcleanup_altered_bbs, i);
809       if (i < NUM_FIXED_BLOCKS)
810 	continue;
811 
812       bb = BASIC_BLOCK_FOR_FN (cfun, i);
813       if (!bb)
814 	continue;
815 
816       retval |= cleanup_control_flow_bb (bb);
817       retval |= cleanup_tree_cfg_bb (bb);
818     }
819 
820   end_recording_case_labels ();
821   BITMAP_FREE (cfgcleanup_altered_bbs);
822   return retval;
823 }
824 
825 static bool
mfb_keep_latches(edge e)826 mfb_keep_latches (edge e)
827 {
828   return ! dominated_by_p (CDI_DOMINATORS, e->src, e->dest);
829 }
830 
831 /* Remove unreachable blocks and other miscellaneous clean up work.
832    Return true if the flowgraph was modified, false otherwise.  */
833 
834 static bool
cleanup_tree_cfg_noloop(unsigned ssa_update_flags)835 cleanup_tree_cfg_noloop (unsigned ssa_update_flags)
836 {
837   bool changed;
838 
839   timevar_push (TV_TREE_CLEANUP_CFG);
840 
841   /* Iterate until there are no more cleanups left to do.  If any
842      iteration changed the flowgraph, set CHANGED to true.
843 
844      If dominance information is available, there cannot be any unreachable
845      blocks.  */
846   if (!dom_info_available_p (CDI_DOMINATORS))
847     {
848       changed = delete_unreachable_blocks ();
849       calculate_dominance_info (CDI_DOMINATORS);
850     }
851   else
852     {
853       checking_verify_dominators (CDI_DOMINATORS);
854       changed = false;
855     }
856 
857   /* Ensure that we have single entries into loop headers.  Otherwise
858      if one of the entries is becoming a latch due to CFG cleanup
859      (from formerly being part of an irreducible region) then we mess
860      up loop fixup and associate the old loop with a different region
861      which makes niter upper bounds invalid.  See for example PR80549.
862      This needs to be done before we remove trivially dead edges as
863      we need to capture the dominance state before the pending transform.  */
864   if (current_loops)
865     {
866       loop_p loop;
867       unsigned i;
868       FOR_EACH_VEC_ELT (*get_loops (cfun), i, loop)
869 	if (loop && loop->header)
870 	  {
871 	    basic_block bb = loop->header;
872 	    edge_iterator ei;
873 	    edge e;
874 	    bool found_latch = false;
875 	    bool any_abnormal = false;
876 	    unsigned n = 0;
877 	    /* We are only interested in preserving existing loops, but
878 	       we need to check whether they are still real and of course
879 	       if we need to add a preheader at all.  */
880 	    FOR_EACH_EDGE (e, ei, bb->preds)
881 	      {
882 		if (e->flags & EDGE_ABNORMAL)
883 		  {
884 		    any_abnormal = true;
885 		    break;
886 		  }
887 		if (dominated_by_p (CDI_DOMINATORS, e->src, bb))
888 		  {
889 		    found_latch = true;
890 		    continue;
891 		  }
892 		n++;
893 	      }
894 	    /* If we have more than one entry to the loop header
895 	       create a forwarder.  */
896 	    if (found_latch && ! any_abnormal && n > 1)
897 	      {
898 		edge fallthru = make_forwarder_block (bb, mfb_keep_latches,
899 						      NULL);
900 		loop->header = fallthru->dest;
901 		if (! loops_state_satisfies_p (LOOPS_NEED_FIXUP))
902 		  {
903 		    /* The loop updating from the CFG hook is incomplete
904 		       when we have multiple latches, fixup manually.  */
905 		    remove_bb_from_loops (fallthru->src);
906 		    loop_p cloop = loop;
907 		    FOR_EACH_EDGE (e, ei, fallthru->src->preds)
908 		      cloop = find_common_loop (cloop, e->src->loop_father);
909 		    add_bb_to_loop (fallthru->src, cloop);
910 		  }
911 	      }
912 	  }
913     }
914 
915   changed |= cleanup_tree_cfg_1 (ssa_update_flags);
916 
917   gcc_assert (dom_info_available_p (CDI_DOMINATORS));
918 
919   /* Do not renumber blocks if the SCEV cache is active, it is indexed by
920      basic-block numbers.  */
921   if (! scev_initialized_p ())
922     compact_blocks ();
923 
924   checking_verify_flow_info ();
925 
926   timevar_pop (TV_TREE_CLEANUP_CFG);
927 
928   if (changed && current_loops)
929     {
930       /* Removing edges and/or blocks may make recorded bounds refer
931          to stale GIMPLE stmts now, so clear them.  */
932       free_numbers_of_iterations_estimates (cfun);
933       loops_state_set (LOOPS_NEED_FIXUP);
934     }
935 
936   return changed;
937 }
938 
939 /* Repairs loop structures.  */
940 
941 static void
repair_loop_structures(void)942 repair_loop_structures (void)
943 {
944   bitmap changed_bbs;
945   unsigned n_new_loops;
946 
947   calculate_dominance_info (CDI_DOMINATORS);
948 
949   timevar_push (TV_REPAIR_LOOPS);
950   changed_bbs = BITMAP_ALLOC (NULL);
951   n_new_loops = fix_loop_structure (changed_bbs);
952 
953   /* This usually does nothing.  But sometimes parts of cfg that originally
954      were inside a loop get out of it due to edge removal (since they
955      become unreachable by back edges from latch).  Also a former
956      irreducible loop can become reducible - in this case force a full
957      rewrite into loop-closed SSA form.  */
958   if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
959     rewrite_into_loop_closed_ssa (n_new_loops ? NULL : changed_bbs,
960 				  TODO_update_ssa);
961 
962   BITMAP_FREE (changed_bbs);
963 
964   checking_verify_loop_structure ();
965   scev_reset ();
966 
967   timevar_pop (TV_REPAIR_LOOPS);
968 }
969 
970 /* Cleanup cfg and repair loop structures.  */
971 
972 bool
cleanup_tree_cfg(unsigned ssa_update_flags)973 cleanup_tree_cfg (unsigned ssa_update_flags)
974 {
975   bool changed = cleanup_tree_cfg_noloop (ssa_update_flags);
976 
977   if (current_loops != NULL
978       && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
979     repair_loop_structures ();
980 
981   return changed;
982 }
983 
984 /* Tries to merge the PHI nodes at BB into those at BB's sole successor.
985    Returns true if successful.  */
986 
987 static bool
remove_forwarder_block_with_phi(basic_block bb)988 remove_forwarder_block_with_phi (basic_block bb)
989 {
990   edge succ = single_succ_edge (bb);
991   basic_block dest = succ->dest;
992   gimple *label;
993   basic_block dombb, domdest, dom;
994 
995   /* We check for infinite loops already in tree_forwarder_block_p.
996      However it may happen that the infinite loop is created
997      afterwards due to removal of forwarders.  */
998   if (dest == bb)
999     return false;
1000 
1001   /* Removal of forwarders may expose new natural loops and thus
1002      a block may turn into a loop header.  */
1003   if (current_loops && bb_loop_header_p (bb))
1004     return false;
1005 
1006   /* If the destination block consists of a nonlocal label, do not
1007      merge it.  */
1008   label = first_stmt (dest);
1009   if (label)
1010     if (glabel *label_stmt = dyn_cast <glabel *> (label))
1011       if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
1012 	return false;
1013 
1014   /* Record BB's single pred in case we need to update the father
1015      loop's latch information later.  */
1016   basic_block pred = NULL;
1017   if (single_pred_p (bb))
1018     pred = single_pred (bb);
1019 
1020   /* Redirect each incoming edge to BB to DEST.  */
1021   while (EDGE_COUNT (bb->preds) > 0)
1022     {
1023       edge e = EDGE_PRED (bb, 0), s;
1024       gphi_iterator gsi;
1025 
1026       s = find_edge (e->src, dest);
1027       if (s)
1028 	{
1029 	  /* We already have an edge S from E->src to DEST.  If S and
1030 	     E->dest's sole successor edge have the same PHI arguments
1031 	     at DEST, redirect S to DEST.  */
1032 	  if (phi_alternatives_equal (dest, s, succ))
1033 	    {
1034 	      e = redirect_edge_and_branch (e, dest);
1035 	      redirect_edge_var_map_clear (e);
1036 	      continue;
1037 	    }
1038 
1039 	  /* PHI arguments are different.  Create a forwarder block by
1040 	     splitting E so that we can merge PHI arguments on E to
1041 	     DEST.  */
1042 	  e = single_succ_edge (split_edge (e));
1043 	}
1044       else
1045 	{
1046 	  /* If we merge the forwarder into a loop header verify if we
1047 	     are creating another loop latch edge.  If so, reset
1048 	     number of iteration information of the loop.  */
1049 	  if (dest->loop_father->header == dest
1050 	      && dominated_by_p (CDI_DOMINATORS, e->src, dest))
1051 	    {
1052 	      dest->loop_father->any_upper_bound = false;
1053 	      dest->loop_father->any_likely_upper_bound = false;
1054 	      free_numbers_of_iterations_estimates (dest->loop_father);
1055 	    }
1056 	}
1057 
1058       s = redirect_edge_and_branch (e, dest);
1059 
1060       /* redirect_edge_and_branch must not create a new edge.  */
1061       gcc_assert (s == e);
1062 
1063       /* Add to the PHI nodes at DEST each PHI argument removed at the
1064 	 destination of E.  */
1065       for (gsi = gsi_start_phis (dest);
1066 	   !gsi_end_p (gsi);
1067 	   gsi_next (&gsi))
1068 	{
1069 	  gphi *phi = gsi.phi ();
1070 	  tree def = gimple_phi_arg_def (phi, succ->dest_idx);
1071 	  source_location locus = gimple_phi_arg_location_from_edge (phi, succ);
1072 
1073 	  if (TREE_CODE (def) == SSA_NAME)
1074 	    {
1075 	      /* If DEF is one of the results of PHI nodes removed during
1076 		 redirection, replace it with the PHI argument that used
1077 		 to be on E.  */
1078 	      vec<edge_var_map> *head = redirect_edge_var_map_vector (e);
1079 	      size_t length = head ? head->length () : 0;
1080 	      for (size_t i = 0; i < length; i++)
1081 		{
1082 		  edge_var_map *vm = &(*head)[i];
1083 		  tree old_arg = redirect_edge_var_map_result (vm);
1084 		  tree new_arg = redirect_edge_var_map_def (vm);
1085 
1086 		  if (def == old_arg)
1087 		    {
1088 		      def = new_arg;
1089 		      locus = redirect_edge_var_map_location (vm);
1090 		      break;
1091 		    }
1092 		}
1093 	    }
1094 
1095 	  add_phi_arg (phi, def, s, locus);
1096 	}
1097 
1098       redirect_edge_var_map_clear (e);
1099     }
1100 
1101   /* Update the dominators.  */
1102   dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
1103   domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
1104   if (domdest == bb)
1105     {
1106       /* Shortcut to avoid calling (relatively expensive)
1107 	 nearest_common_dominator unless necessary.  */
1108       dom = dombb;
1109     }
1110   else
1111     dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
1112 
1113   set_immediate_dominator (CDI_DOMINATORS, dest, dom);
1114 
1115   /* Adjust latch infomation of BB's parent loop as otherwise
1116      the cfg hook has a hard time not to kill the loop.  */
1117   if (current_loops && bb->loop_father->latch == bb)
1118     bb->loop_father->latch = pred;
1119 
1120   /* Remove BB since all of BB's incoming edges have been redirected
1121      to DEST.  */
1122   delete_basic_block (bb);
1123 
1124   return true;
1125 }
1126 
1127 /* This pass merges PHI nodes if one feeds into another.  For example,
1128    suppose we have the following:
1129 
1130   goto <bb 9> (<L9>);
1131 
1132 <L8>:;
1133   tem_17 = foo ();
1134 
1135   # tem_6 = PHI <tem_17(8), tem_23(7)>;
1136 <L9>:;
1137 
1138   # tem_3 = PHI <tem_6(9), tem_2(5)>;
1139 <L10>:;
1140 
1141   Then we merge the first PHI node into the second one like so:
1142 
1143   goto <bb 9> (<L10>);
1144 
1145 <L8>:;
1146   tem_17 = foo ();
1147 
1148   # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
1149 <L10>:;
1150 */
1151 
1152 namespace {
1153 
1154 const pass_data pass_data_merge_phi =
1155 {
1156   GIMPLE_PASS, /* type */
1157   "mergephi", /* name */
1158   OPTGROUP_NONE, /* optinfo_flags */
1159   TV_TREE_MERGE_PHI, /* tv_id */
1160   ( PROP_cfg | PROP_ssa ), /* properties_required */
1161   0, /* properties_provided */
1162   0, /* properties_destroyed */
1163   0, /* todo_flags_start */
1164   0, /* todo_flags_finish */
1165 };
1166 
1167 class pass_merge_phi : public gimple_opt_pass
1168 {
1169 public:
pass_merge_phi(gcc::context * ctxt)1170   pass_merge_phi (gcc::context *ctxt)
1171     : gimple_opt_pass (pass_data_merge_phi, ctxt)
1172   {}
1173 
1174   /* opt_pass methods: */
clone()1175   opt_pass * clone () { return new pass_merge_phi (m_ctxt); }
1176   virtual unsigned int execute (function *);
1177 
1178 }; // class pass_merge_phi
1179 
1180 unsigned int
execute(function * fun)1181 pass_merge_phi::execute (function *fun)
1182 {
1183   basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (fun));
1184   basic_block *current = worklist;
1185   basic_block bb;
1186 
1187   calculate_dominance_info (CDI_DOMINATORS);
1188 
1189   /* Find all PHI nodes that we may be able to merge.  */
1190   FOR_EACH_BB_FN (bb, fun)
1191     {
1192       basic_block dest;
1193 
1194       /* Look for a forwarder block with PHI nodes.  */
1195       if (!tree_forwarder_block_p (bb, true))
1196 	continue;
1197 
1198       dest = single_succ (bb);
1199 
1200       /* We have to feed into another basic block with PHI
1201 	 nodes.  */
1202       if (gimple_seq_empty_p (phi_nodes (dest))
1203 	  /* We don't want to deal with a basic block with
1204 	     abnormal edges.  */
1205 	  || bb_has_abnormal_pred (bb))
1206 	continue;
1207 
1208       if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
1209 	{
1210 	  /* If BB does not dominate DEST, then the PHI nodes at
1211 	     DEST must be the only users of the results of the PHI
1212 	     nodes at BB.  */
1213 	  *current++ = bb;
1214 	}
1215       else
1216 	{
1217 	  gphi_iterator gsi;
1218 	  unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
1219 
1220 	  /* BB dominates DEST.  There may be many users of the PHI
1221 	     nodes in BB.  However, there is still a trivial case we
1222 	     can handle.  If the result of every PHI in BB is used
1223 	     only by a PHI in DEST, then we can trivially merge the
1224 	     PHI nodes from BB into DEST.  */
1225 	  for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
1226 	       gsi_next (&gsi))
1227 	    {
1228 	      gphi *phi = gsi.phi ();
1229 	      tree result = gimple_phi_result (phi);
1230 	      use_operand_p imm_use;
1231 	      gimple *use_stmt;
1232 
1233 	      /* If the PHI's result is never used, then we can just
1234 		 ignore it.  */
1235 	      if (has_zero_uses (result))
1236 		continue;
1237 
1238 	      /* Get the single use of the result of this PHI node.  */
1239   	      if (!single_imm_use (result, &imm_use, &use_stmt)
1240 		  || gimple_code (use_stmt) != GIMPLE_PHI
1241 		  || gimple_bb (use_stmt) != dest
1242 		  || gimple_phi_arg_def (use_stmt, dest_idx) != result)
1243 		break;
1244 	    }
1245 
1246 	  /* If the loop above iterated through all the PHI nodes
1247 	     in BB, then we can merge the PHIs from BB into DEST.  */
1248 	  if (gsi_end_p (gsi))
1249 	    *current++ = bb;
1250 	}
1251     }
1252 
1253   /* Now let's drain WORKLIST.  */
1254   bool changed = false;
1255   while (current != worklist)
1256     {
1257       bb = *--current;
1258       changed |= remove_forwarder_block_with_phi (bb);
1259     }
1260   free (worklist);
1261 
1262   /* Removing forwarder blocks can cause formerly irreducible loops
1263      to become reducible if we merged two entry blocks.  */
1264   if (changed
1265       && current_loops)
1266     loops_state_set (LOOPS_NEED_FIXUP);
1267 
1268   return 0;
1269 }
1270 
1271 } // anon namespace
1272 
1273 gimple_opt_pass *
make_pass_merge_phi(gcc::context * ctxt)1274 make_pass_merge_phi (gcc::context *ctxt)
1275 {
1276   return new pass_merge_phi (ctxt);
1277 }
1278 
1279 /* Pass: cleanup the CFG just before expanding trees to RTL.
1280    This is just a round of label cleanups and case node grouping
1281    because after the tree optimizers have run such cleanups may
1282    be necessary.  */
1283 
1284 static unsigned int
execute_cleanup_cfg_post_optimizing(void)1285 execute_cleanup_cfg_post_optimizing (void)
1286 {
1287   unsigned int todo = execute_fixup_cfg ();
1288   if (cleanup_tree_cfg ())
1289     {
1290       todo &= ~TODO_cleanup_cfg;
1291       todo |= TODO_update_ssa;
1292     }
1293   maybe_remove_unreachable_handlers ();
1294   cleanup_dead_labels ();
1295   if (group_case_labels ())
1296     todo |= TODO_cleanup_cfg;
1297   if ((flag_compare_debug_opt || flag_compare_debug)
1298       && flag_dump_final_insns)
1299     {
1300       FILE *final_output = fopen (flag_dump_final_insns, "a");
1301 
1302       if (!final_output)
1303 	{
1304 	  error ("could not open final insn dump file %qs: %m",
1305 		 flag_dump_final_insns);
1306 	  flag_dump_final_insns = NULL;
1307 	}
1308       else
1309 	{
1310 	  int save_unnumbered = flag_dump_unnumbered;
1311 	  int save_noaddr = flag_dump_noaddr;
1312 
1313 	  flag_dump_noaddr = flag_dump_unnumbered = 1;
1314 	  fprintf (final_output, "\n");
1315 	  dump_enumerated_decls (final_output,
1316 				 dump_flags | TDF_SLIM | TDF_NOUID);
1317 	  flag_dump_noaddr = save_noaddr;
1318 	  flag_dump_unnumbered = save_unnumbered;
1319 	  if (fclose (final_output))
1320 	    {
1321 	      error ("could not close final insn dump file %qs: %m",
1322 		     flag_dump_final_insns);
1323 	      flag_dump_final_insns = NULL;
1324 	    }
1325 	}
1326     }
1327   return todo;
1328 }
1329 
1330 namespace {
1331 
1332 const pass_data pass_data_cleanup_cfg_post_optimizing =
1333 {
1334   GIMPLE_PASS, /* type */
1335   "optimized", /* name */
1336   OPTGROUP_NONE, /* optinfo_flags */
1337   TV_TREE_CLEANUP_CFG, /* tv_id */
1338   PROP_cfg, /* properties_required */
1339   0, /* properties_provided */
1340   0, /* properties_destroyed */
1341   0, /* todo_flags_start */
1342   TODO_remove_unused_locals, /* todo_flags_finish */
1343 };
1344 
1345 class pass_cleanup_cfg_post_optimizing : public gimple_opt_pass
1346 {
1347 public:
pass_cleanup_cfg_post_optimizing(gcc::context * ctxt)1348   pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1349     : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing, ctxt)
1350   {}
1351 
1352   /* opt_pass methods: */
execute(function *)1353   virtual unsigned int execute (function *)
1354     {
1355       return execute_cleanup_cfg_post_optimizing ();
1356     }
1357 
1358 }; // class pass_cleanup_cfg_post_optimizing
1359 
1360 } // anon namespace
1361 
1362 gimple_opt_pass *
make_pass_cleanup_cfg_post_optimizing(gcc::context * ctxt)1363 make_pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1364 {
1365   return new pass_cleanup_cfg_post_optimizing (ctxt);
1366 }
1367 
1368 
1369