1 /* CFG cleanup for trees.
2    Copyright (C) 2001-2019 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10 
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "diagnostic-core.h"
31 #include "fold-const.h"
32 #include "cfganal.h"
33 #include "cfgcleanup.h"
34 #include "tree-eh.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-ssa-loop-manip.h"
39 #include "tree-dfa.h"
40 #include "tree-ssa.h"
41 #include "cfgloop.h"
42 #include "tree-scalar-evolution.h"
43 #include "gimple-match.h"
44 #include "gimple-fold.h"
45 #include "tree-ssa-loop-niter.h"
46 #include "cgraph.h"
47 #include "tree-into-ssa.h"
48 #include "tree-cfgcleanup.h"
49 
50 
51 /* The set of blocks in that at least one of the following changes happened:
52    -- the statement at the end of the block was changed
53    -- the block was newly created
54    -- the set of the predecessors of the block changed
55    -- the set of the successors of the block changed
56    ??? Maybe we could track these changes separately, since they determine
57        what cleanups it makes sense to try on the block.  */
58 bitmap cfgcleanup_altered_bbs;
59 
60 /* Remove any fallthru edge from EV.  Return true if an edge was removed.  */
61 
62 static bool
remove_fallthru_edge(vec<edge,va_gc> * ev)63 remove_fallthru_edge (vec<edge, va_gc> *ev)
64 {
65   edge_iterator ei;
66   edge e;
67 
68   FOR_EACH_EDGE (e, ei, ev)
69     if ((e->flags & EDGE_FALLTHRU) != 0)
70       {
71 	if (e->flags & EDGE_COMPLEX)
72 	  e->flags &= ~EDGE_FALLTHRU;
73 	else
74 	  remove_edge_and_dominated_blocks (e);
75 	return true;
76       }
77   return false;
78 }
79 
80 /* Convert a SWTCH with single non-default case to gcond and replace it
81    at GSI.  */
82 
83 static bool
convert_single_case_switch(gswitch * swtch,gimple_stmt_iterator & gsi)84 convert_single_case_switch (gswitch *swtch, gimple_stmt_iterator &gsi)
85 {
86   if (gimple_switch_num_labels (swtch) != 2)
87     return false;
88 
89   tree index = gimple_switch_index (swtch);
90   tree label = gimple_switch_label (swtch, 1);
91   tree low = CASE_LOW (label);
92   tree high = CASE_HIGH (label);
93 
94   basic_block default_bb = gimple_switch_default_bb (cfun, swtch);
95   basic_block case_bb = label_to_block (cfun, CASE_LABEL (label));
96 
97   basic_block bb = gimple_bb (swtch);
98   gcond *cond;
99 
100   /* Replace switch statement with condition statement.  */
101   if (high)
102     {
103       tree lhs, rhs;
104       if (range_check_type (TREE_TYPE (index)) == NULL_TREE)
105 	return false;
106       generate_range_test (bb, index, low, high, &lhs, &rhs);
107       cond = gimple_build_cond (LE_EXPR, lhs, rhs, NULL_TREE, NULL_TREE);
108     }
109   else
110     cond = gimple_build_cond (EQ_EXPR, index,
111 			      fold_convert (TREE_TYPE (index), low),
112 			      NULL_TREE, NULL_TREE);
113 
114   gsi_replace (&gsi, cond, true);
115 
116   /* Update edges.  */
117   edge case_edge = find_edge (bb, case_bb);
118   edge default_edge = find_edge (bb, default_bb);
119 
120   case_edge->flags |= EDGE_TRUE_VALUE;
121   default_edge->flags |= EDGE_FALSE_VALUE;
122   return true;
123 }
124 
125 /* Disconnect an unreachable block in the control expression starting
126    at block BB.  */
127 
128 static bool
cleanup_control_expr_graph(basic_block bb,gimple_stmt_iterator gsi)129 cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
130 {
131   edge taken_edge;
132   bool retval = false;
133   gimple *stmt = gsi_stmt (gsi);
134 
135   if (!single_succ_p (bb))
136     {
137       edge e;
138       edge_iterator ei;
139       bool warned;
140       tree val = NULL_TREE;
141 
142       /* Try to convert a switch with just a single non-default case to
143 	 GIMPLE condition.  */
144       if (gimple_code (stmt) == GIMPLE_SWITCH
145 	  && convert_single_case_switch (as_a<gswitch *> (stmt), gsi))
146 	stmt = gsi_stmt (gsi);
147 
148       fold_defer_overflow_warnings ();
149       switch (gimple_code (stmt))
150 	{
151 	case GIMPLE_COND:
152 	  {
153 	    gimple_match_op res_op;
154 	    if (gimple_simplify (stmt, &res_op, NULL, no_follow_ssa_edges,
155 				 no_follow_ssa_edges)
156 		&& res_op.code == INTEGER_CST)
157 	      val = res_op.ops[0];
158 	  }
159 	  break;
160 
161 	case GIMPLE_SWITCH:
162 	  val = gimple_switch_index (as_a <gswitch *> (stmt));
163 	  break;
164 
165 	default:
166 	  ;
167 	}
168       taken_edge = find_taken_edge (bb, val);
169       if (!taken_edge)
170 	{
171 	  fold_undefer_and_ignore_overflow_warnings ();
172 	  return false;
173 	}
174 
175       /* Remove all the edges except the one that is always executed.  */
176       warned = false;
177       for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
178 	{
179 	  if (e != taken_edge)
180 	    {
181 	      if (!warned)
182 		{
183 		  fold_undefer_overflow_warnings
184 		    (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
185 		  warned = true;
186 		}
187 
188 	      taken_edge->probability += e->probability;
189 	      remove_edge_and_dominated_blocks (e);
190 	      retval = true;
191 	    }
192 	  else
193 	    ei_next (&ei);
194 	}
195       if (!warned)
196 	fold_undefer_and_ignore_overflow_warnings ();
197     }
198   else
199     taken_edge = single_succ_edge (bb);
200 
201   bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
202   gsi_remove (&gsi, true);
203   taken_edge->flags = EDGE_FALLTHRU;
204 
205   return retval;
206 }
207 
208 /* Cleanup the GF_CALL_CTRL_ALTERING flag according to
209    to updated gimple_call_flags.  */
210 
211 static void
cleanup_call_ctrl_altering_flag(gimple * bb_end)212 cleanup_call_ctrl_altering_flag (gimple *bb_end)
213 {
214   if (!is_gimple_call (bb_end)
215       || !gimple_call_ctrl_altering_p (bb_end))
216     return;
217 
218   int flags = gimple_call_flags (bb_end);
219   if (((flags & (ECF_CONST | ECF_PURE))
220        && !(flags & ECF_LOOPING_CONST_OR_PURE))
221       || (flags & ECF_LEAF))
222     gimple_call_set_ctrl_altering (bb_end, false);
223 }
224 
225 /* Try to remove superfluous control structures in basic block BB.  Returns
226    true if anything changes.  */
227 
228 static bool
cleanup_control_flow_bb(basic_block bb)229 cleanup_control_flow_bb (basic_block bb)
230 {
231   gimple_stmt_iterator gsi;
232   bool retval = false;
233   gimple *stmt;
234 
235   /* If the last statement of the block could throw and now cannot,
236      we need to prune cfg.  */
237   retval |= gimple_purge_dead_eh_edges (bb);
238 
239   gsi = gsi_last_nondebug_bb (bb);
240   if (gsi_end_p (gsi))
241     return retval;
242 
243   stmt = gsi_stmt (gsi);
244 
245   /* Try to cleanup ctrl altering flag for call which ends bb.  */
246   cleanup_call_ctrl_altering_flag (stmt);
247 
248   if (gimple_code (stmt) == GIMPLE_COND
249       || gimple_code (stmt) == GIMPLE_SWITCH)
250     {
251       gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
252       retval |= cleanup_control_expr_graph (bb, gsi);
253     }
254   else if (gimple_code (stmt) == GIMPLE_GOTO
255 	   && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
256 	   && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
257 	       == LABEL_DECL))
258     {
259       /* If we had a computed goto which has a compile-time determinable
260 	 destination, then we can eliminate the goto.  */
261       edge e;
262       tree label;
263       edge_iterator ei;
264       basic_block target_block;
265 
266       gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
267       /* First look at all the outgoing edges.  Delete any outgoing
268 	 edges which do not go to the right block.  For the one
269 	 edge which goes to the right block, fix up its flags.  */
270       label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
271       if (DECL_CONTEXT (label) != cfun->decl)
272 	return retval;
273       target_block = label_to_block (cfun, label);
274       for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
275 	{
276 	  if (e->dest != target_block)
277 	    remove_edge_and_dominated_blocks (e);
278 	  else
279 	    {
280 	      /* Turn off the EDGE_ABNORMAL flag.  */
281 	      e->flags &= ~EDGE_ABNORMAL;
282 
283 	      /* And set EDGE_FALLTHRU.  */
284 	      e->flags |= EDGE_FALLTHRU;
285 	      ei_next (&ei);
286 	    }
287 	}
288 
289       bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
290       bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
291 
292       /* Remove the GOTO_EXPR as it is not needed.  The CFG has all the
293 	 relevant information we need.  */
294       gsi_remove (&gsi, true);
295       retval = true;
296     }
297 
298   /* Check for indirect calls that have been turned into
299      noreturn calls.  */
300   else if (is_gimple_call (stmt)
301 	   && gimple_call_noreturn_p (stmt))
302     {
303       /* If there are debug stmts after the noreturn call, remove them
304 	 now, they should be all unreachable anyway.  */
305       for (gsi_next (&gsi); !gsi_end_p (gsi); )
306 	gsi_remove (&gsi, true);
307       if (remove_fallthru_edge (bb->succs))
308 	retval = true;
309     }
310 
311   return retval;
312 }
313 
314 /* Return true if basic block BB does nothing except pass control
315    flow to another block and that we can safely insert a label at
316    the start of the successor block.
317 
318    As a precondition, we require that BB be not equal to
319    the entry block.  */
320 
321 static bool
tree_forwarder_block_p(basic_block bb,bool phi_wanted)322 tree_forwarder_block_p (basic_block bb, bool phi_wanted)
323 {
324   gimple_stmt_iterator gsi;
325   location_t locus;
326 
327   /* BB must have a single outgoing edge.  */
328   if (single_succ_p (bb) != 1
329       /* If PHI_WANTED is false, BB must not have any PHI nodes.
330 	 Otherwise, BB must have PHI nodes.  */
331       || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
332       /* BB may not be a predecessor of the exit block.  */
333       || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
334       /* Nor should this be an infinite loop.  */
335       || single_succ (bb) == bb
336       /* BB may not have an abnormal outgoing edge.  */
337       || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
338     return false;
339 
340   gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun));
341 
342   locus = single_succ_edge (bb)->goto_locus;
343 
344   /* There should not be an edge coming from entry, or an EH edge.  */
345   {
346     edge_iterator ei;
347     edge e;
348 
349     FOR_EACH_EDGE (e, ei, bb->preds)
350       if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || (e->flags & EDGE_EH))
351 	return false;
352       /* If goto_locus of any of the edges differs, prevent removing
353 	 the forwarder block when not optimizing.  */
354       else if (!optimize
355 	       && (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
356 		   || LOCATION_LOCUS (locus) != UNKNOWN_LOCATION)
357 	       && e->goto_locus != locus)
358 	return false;
359   }
360 
361   /* Now walk through the statements backward.  We can ignore labels,
362      anything else means this is not a forwarder block.  */
363   for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
364     {
365       gimple *stmt = gsi_stmt (gsi);
366 
367       switch (gimple_code (stmt))
368 	{
369 	case GIMPLE_LABEL:
370 	  if (DECL_NONLOCAL (gimple_label_label (as_a <glabel *> (stmt))))
371 	    return false;
372 	  if (!optimize
373 	      && (gimple_has_location (stmt)
374 		  || LOCATION_LOCUS (locus) != UNKNOWN_LOCATION)
375 	      && gimple_location (stmt) != locus)
376 	    return false;
377 	  break;
378 
379 	  /* ??? For now, hope there's a corresponding debug
380 	     assignment at the destination.  */
381 	case GIMPLE_DEBUG:
382 	  break;
383 
384 	default:
385 	  return false;
386 	}
387     }
388 
389   if (current_loops)
390     {
391       basic_block dest;
392       /* Protect loop headers.  */
393       if (bb_loop_header_p (bb))
394 	return false;
395 
396       dest = EDGE_SUCC (bb, 0)->dest;
397       /* Protect loop preheaders and latches if requested.  */
398       if (dest->loop_father->header == dest)
399 	{
400 	  if (bb->loop_father == dest->loop_father)
401 	    {
402 	      if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
403 		return false;
404 	      /* If bb doesn't have a single predecessor we'd make this
405 		 loop have multiple latches.  Don't do that if that
406 		 would in turn require disambiguating them.  */
407 	      return (single_pred_p (bb)
408 		      || loops_state_satisfies_p
409 		      	   (LOOPS_MAY_HAVE_MULTIPLE_LATCHES));
410 	    }
411 	  else if (bb->loop_father == loop_outer (dest->loop_father))
412 	    return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS);
413 	  /* Always preserve other edges into loop headers that are
414 	     not simple latches or preheaders.  */
415 	  return false;
416 	}
417     }
418 
419   return true;
420 }
421 
422 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
423    those alternatives are equal in each of the PHI nodes, then return
424    true, else return false.  */
425 
426 static bool
phi_alternatives_equal(basic_block dest,edge e1,edge e2)427 phi_alternatives_equal (basic_block dest, edge e1, edge e2)
428 {
429   int n1 = e1->dest_idx;
430   int n2 = e2->dest_idx;
431   gphi_iterator gsi;
432 
433   for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
434     {
435       gphi *phi = gsi.phi ();
436       tree val1 = gimple_phi_arg_def (phi, n1);
437       tree val2 = gimple_phi_arg_def (phi, n2);
438 
439       gcc_assert (val1 != NULL_TREE);
440       gcc_assert (val2 != NULL_TREE);
441 
442       if (!operand_equal_for_phi_arg_p (val1, val2))
443 	return false;
444     }
445 
446   return true;
447 }
448 
449 /* Move debug stmts from the forwarder block SRC to DEST.  */
450 
451 static void
move_debug_stmts_from_forwarder(basic_block src,basic_block dest,bool dest_single_pred_p)452 move_debug_stmts_from_forwarder (basic_block src, basic_block dest,
453 				 bool dest_single_pred_p)
454 {
455   if (!MAY_HAVE_DEBUG_STMTS)
456     return;
457 
458   gimple_stmt_iterator gsi_to = gsi_after_labels (dest);
459   for (gimple_stmt_iterator gsi = gsi_after_labels (src); !gsi_end_p (gsi);)
460     {
461       gimple *debug = gsi_stmt (gsi);
462       gcc_assert (is_gimple_debug (debug));
463       /* Move debug binds anyway, but not anything else like begin-stmt
464 	 markers unless they are always valid at the destination.  */
465       if (dest_single_pred_p
466 	  || gimple_debug_bind_p (debug))
467 	{
468 	  gsi_move_before (&gsi, &gsi_to);
469 	  /* Reset debug-binds that are not always valid at the destination.
470 	     Simply dropping them can cause earlier values to become live,
471 	     generating wrong debug information.
472 	     ???  There are several things we could improve here.  For
473 	     one we might be able to move stmts to the predecessor.
474 	     For anther, if the debug stmt is immediately followed by a
475 	     (debug) definition in the destination (on a post-dominated path?)
476 	     we can elide it without any bad effects.  */
477 	  if (!dest_single_pred_p)
478 	    {
479 	      gimple_debug_bind_reset_value (debug);
480 	      update_stmt (debug);
481 	    }
482 	}
483       else
484 	gsi_next (&gsi);
485     }
486 }
487 
488 /* Removes forwarder block BB.  Returns false if this failed.  */
489 
490 static bool
remove_forwarder_block(basic_block bb)491 remove_forwarder_block (basic_block bb)
492 {
493   edge succ = single_succ_edge (bb), e, s;
494   basic_block dest = succ->dest;
495   gimple *stmt;
496   edge_iterator ei;
497   gimple_stmt_iterator gsi, gsi_to;
498 
499   /* We check for infinite loops already in tree_forwarder_block_p.
500      However it may happen that the infinite loop is created
501      afterwards due to removal of forwarders.  */
502   if (dest == bb)
503     return false;
504 
505   /* If the destination block consists of a nonlocal label or is a
506      EH landing pad, do not merge it.  */
507   stmt = first_stmt (dest);
508   if (stmt)
509     if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
510       if (DECL_NONLOCAL (gimple_label_label (label_stmt))
511 	  || EH_LANDING_PAD_NR (gimple_label_label (label_stmt)) != 0)
512 	return false;
513 
514   /* If there is an abnormal edge to basic block BB, but not into
515      dest, problems might occur during removal of the phi node at out
516      of ssa due to overlapping live ranges of registers.
517 
518      If there is an abnormal edge in DEST, the problems would occur
519      anyway since cleanup_dead_labels would then merge the labels for
520      two different eh regions, and rest of exception handling code
521      does not like it.
522 
523      So if there is an abnormal edge to BB, proceed only if there is
524      no abnormal edge to DEST and there are no phi nodes in DEST.  */
525   if (bb_has_abnormal_pred (bb)
526       && (bb_has_abnormal_pred (dest)
527 	  || !gimple_seq_empty_p (phi_nodes (dest))))
528     return false;
529 
530   /* If there are phi nodes in DEST, and some of the blocks that are
531      predecessors of BB are also predecessors of DEST, check that the
532      phi node arguments match.  */
533   if (!gimple_seq_empty_p (phi_nodes (dest)))
534     {
535       FOR_EACH_EDGE (e, ei, bb->preds)
536 	{
537 	  s = find_edge (e->src, dest);
538 	  if (!s)
539 	    continue;
540 
541 	  if (!phi_alternatives_equal (dest, succ, s))
542 	    return false;
543 	}
544     }
545 
546   basic_block pred = NULL;
547   if (single_pred_p (bb))
548     pred = single_pred (bb);
549   bool dest_single_pred_p = single_pred_p (dest);
550 
551   /* Redirect the edges.  */
552   for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
553     {
554       bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
555 
556       if (e->flags & EDGE_ABNORMAL)
557 	{
558 	  /* If there is an abnormal edge, redirect it anyway, and
559 	     move the labels to the new block to make it legal.  */
560 	  s = redirect_edge_succ_nodup (e, dest);
561 	}
562       else
563 	s = redirect_edge_and_branch (e, dest);
564 
565       if (s == e)
566 	{
567 	  /* Create arguments for the phi nodes, since the edge was not
568 	     here before.  */
569 	  for (gphi_iterator psi = gsi_start_phis (dest);
570 	       !gsi_end_p (psi);
571 	       gsi_next (&psi))
572 	    {
573 	      gphi *phi = psi.phi ();
574 	      location_t l = gimple_phi_arg_location_from_edge (phi, succ);
575 	      tree def = gimple_phi_arg_def (phi, succ->dest_idx);
576 	      add_phi_arg (phi, unshare_expr (def), s, l);
577 	    }
578 	}
579     }
580 
581   /* Move nonlocal labels and computed goto targets as well as user
582      defined labels and labels with an EH landing pad number to the
583      new block, so that the redirection of the abnormal edges works,
584      jump targets end up in a sane place and debug information for
585      labels is retained.  */
586   gsi_to = gsi_start_bb (dest);
587   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
588     {
589       stmt = gsi_stmt (gsi);
590       if (is_gimple_debug (stmt))
591 	break;
592 
593       /* Forwarder blocks can only contain labels and debug stmts, and
594 	 labels must come first, so if we get to this point, we know
595 	 we're looking at a label.  */
596       tree decl = gimple_label_label (as_a <glabel *> (stmt));
597       if (EH_LANDING_PAD_NR (decl) != 0
598 	  || DECL_NONLOCAL (decl)
599 	  || FORCED_LABEL (decl)
600 	  || !DECL_ARTIFICIAL (decl))
601 	gsi_move_before (&gsi, &gsi_to);
602       else
603 	gsi_next (&gsi);
604     }
605 
606   /* Move debug statements.  Reset them if the destination does not
607      have a single predecessor.  */
608   move_debug_stmts_from_forwarder (bb, dest, dest_single_pred_p);
609 
610   bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
611 
612   /* Update the dominators.  */
613   if (dom_info_available_p (CDI_DOMINATORS))
614     {
615       basic_block dom, dombb, domdest;
616 
617       dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
618       domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
619       if (domdest == bb)
620 	{
621 	  /* Shortcut to avoid calling (relatively expensive)
622 	     nearest_common_dominator unless necessary.  */
623 	  dom = dombb;
624 	}
625       else
626 	dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
627 
628       set_immediate_dominator (CDI_DOMINATORS, dest, dom);
629     }
630 
631   /* Adjust latch infomation of BB's parent loop as otherwise
632      the cfg hook has a hard time not to kill the loop.  */
633   if (current_loops && bb->loop_father->latch == bb)
634     bb->loop_father->latch = pred;
635 
636   /* And kill the forwarder block.  */
637   delete_basic_block (bb);
638 
639   return true;
640 }
641 
642 /* STMT is a call that has been discovered noreturn.  Split the
643    block to prepare fixing up the CFG and remove LHS.
644    Return true if cleanup-cfg needs to run.  */
645 
646 bool
fixup_noreturn_call(gimple * stmt)647 fixup_noreturn_call (gimple *stmt)
648 {
649   basic_block bb = gimple_bb (stmt);
650   bool changed = false;
651 
652   if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
653     return false;
654 
655   /* First split basic block if stmt is not last.  */
656   if (stmt != gsi_stmt (gsi_last_bb (bb)))
657     {
658       if (stmt == gsi_stmt (gsi_last_nondebug_bb (bb)))
659 	{
660 	  /* Don't split if there are only debug stmts
661 	     after stmt, that can result in -fcompare-debug
662 	     failures.  Remove the debug stmts instead,
663 	     they should be all unreachable anyway.  */
664 	  gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
665 	  for (gsi_next (&gsi); !gsi_end_p (gsi); )
666 	    gsi_remove (&gsi, true);
667 	}
668       else
669 	{
670 	  split_block (bb, stmt);
671 	  changed = true;
672 	}
673     }
674 
675   /* If there is an LHS, remove it, but only if its type has fixed size.
676      The LHS will need to be recreated during RTL expansion and creating
677      temporaries of variable-sized types is not supported.  Also don't
678      do this with TREE_ADDRESSABLE types, as assign_temp will abort.
679      Drop LHS regardless of TREE_ADDRESSABLE, if the function call
680      has been changed into a call that does not return a value, like
681      __builtin_unreachable or __cxa_pure_virtual.  */
682   tree lhs = gimple_call_lhs (stmt);
683   if (lhs
684       && (should_remove_lhs_p (lhs)
685 	  || VOID_TYPE_P (TREE_TYPE (gimple_call_fntype (stmt)))))
686     {
687       gimple_call_set_lhs (stmt, NULL_TREE);
688 
689       /* We need to fix up the SSA name to avoid checking errors.  */
690       if (TREE_CODE (lhs) == SSA_NAME)
691 	{
692 	  tree new_var = create_tmp_reg (TREE_TYPE (lhs));
693 	  SET_SSA_NAME_VAR_OR_IDENTIFIER (lhs, new_var);
694 	  SSA_NAME_DEF_STMT (lhs) = gimple_build_nop ();
695 	  set_ssa_default_def (cfun, new_var, lhs);
696 	}
697 
698       update_stmt (stmt);
699     }
700 
701   /* Mark the call as altering control flow.  */
702   if (!gimple_call_ctrl_altering_p (stmt))
703     {
704       gimple_call_set_ctrl_altering (stmt, true);
705       changed = true;
706     }
707 
708   return changed;
709 }
710 
711 /* Return true if we want to merge BB1 and BB2 into a single block.  */
712 
713 static bool
want_merge_blocks_p(basic_block bb1,basic_block bb2)714 want_merge_blocks_p (basic_block bb1, basic_block bb2)
715 {
716   if (!can_merge_blocks_p (bb1, bb2))
717     return false;
718   gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb1);
719   if (gsi_end_p (gsi) || !stmt_can_terminate_bb_p (gsi_stmt (gsi)))
720     return true;
721   return bb1->count.ok_for_merging (bb2->count);
722 }
723 
724 
725 /* Tries to cleanup cfg in basic block BB by merging blocks.  Returns
726    true if anything changes.  */
727 
728 static bool
cleanup_tree_cfg_bb(basic_block bb)729 cleanup_tree_cfg_bb (basic_block bb)
730 {
731   if (tree_forwarder_block_p (bb, false)
732       && remove_forwarder_block (bb))
733     return true;
734 
735   /* If there is a merge opportunity with the predecessor
736      do nothing now but wait until we process the predecessor.
737      This happens when we visit BBs in a non-optimal order and
738      avoids quadratic behavior with adjusting stmts BB pointer.  */
739   if (single_pred_p (bb)
740       && want_merge_blocks_p (single_pred (bb), bb))
741     /* But make sure we _do_ visit it.  When we remove unreachable paths
742        ending in a backedge we fail to mark the destinations predecessors
743        as changed.  */
744     bitmap_set_bit (cfgcleanup_altered_bbs, single_pred (bb)->index);
745 
746   /* Merging the blocks may create new opportunities for folding
747      conditional branches (due to the elimination of single-valued PHI
748      nodes).  */
749   else if (single_succ_p (bb)
750 	   && want_merge_blocks_p (bb, single_succ (bb)))
751     {
752       merge_blocks (bb, single_succ (bb));
753       return true;
754     }
755 
756   return false;
757 }
758 
759 /* Return true if E is an EDGE_ABNORMAL edge for returns_twice calls,
760    i.e. one going from .ABNORMAL_DISPATCHER to basic block which doesn't
761    start with a forced or nonlocal label.  Calls which return twice can return
762    the second time only if they are called normally the first time, so basic
763    blocks which can be only entered through these abnormal edges but not
764    normally are effectively unreachable as well.  Additionally ignore
765    __builtin_setjmp_receiver starting blocks, which have one FORCED_LABEL
766    and which are always only reachable through EDGE_ABNORMAL edge.  They are
767    handled in cleanup_control_flow_pre.  */
768 
769 static bool
maybe_dead_abnormal_edge_p(edge e)770 maybe_dead_abnormal_edge_p (edge e)
771 {
772   if ((e->flags & (EDGE_ABNORMAL | EDGE_EH)) != EDGE_ABNORMAL)
773     return false;
774 
775   gimple_stmt_iterator gsi = gsi_start_nondebug_after_labels_bb (e->src);
776   gimple *g = gsi_stmt (gsi);
777   if (!g || !gimple_call_internal_p (g, IFN_ABNORMAL_DISPATCHER))
778     return false;
779 
780   tree target = NULL_TREE;
781   for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
782     if (glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi)))
783       {
784 	tree this_target = gimple_label_label (label_stmt);
785 	if (DECL_NONLOCAL (this_target))
786 	  return false;
787 	if (FORCED_LABEL (this_target))
788 	  {
789 	    if (target)
790 	      return false;
791 	    target = this_target;
792 	  }
793       }
794     else
795       break;
796 
797   if (target)
798     {
799       /* If there was a single FORCED_LABEL, check for
800 	 __builtin_setjmp_receiver with address of that label.  */
801       if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
802 	gsi_next_nondebug (&gsi);
803       if (gsi_end_p (gsi))
804 	return false;
805       if (!gimple_call_builtin_p (gsi_stmt (gsi), BUILT_IN_SETJMP_RECEIVER))
806 	return false;
807 
808       tree arg = gimple_call_arg (gsi_stmt (gsi), 0);
809       if (TREE_CODE (arg) != ADDR_EXPR || TREE_OPERAND (arg, 0) != target)
810 	return false;
811     }
812   return true;
813 }
814 
815 /* If BB is a basic block ending with __builtin_setjmp_setup, return edge
816    from .ABNORMAL_DISPATCHER basic block to corresponding
817    __builtin_setjmp_receiver basic block, otherwise return NULL.  */
818 static edge
builtin_setjmp_setup_bb(basic_block bb)819 builtin_setjmp_setup_bb (basic_block bb)
820 {
821   if (EDGE_COUNT (bb->succs) != 2
822       || ((EDGE_SUCC (bb, 0)->flags
823 	   & (EDGE_ABNORMAL | EDGE_EH)) != EDGE_ABNORMAL
824 	  && (EDGE_SUCC (bb, 1)->flags
825 	      & (EDGE_ABNORMAL | EDGE_EH)) != EDGE_ABNORMAL))
826     return NULL;
827 
828   gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
829   if (gsi_end_p (gsi)
830       || !gimple_call_builtin_p (gsi_stmt (gsi), BUILT_IN_SETJMP_SETUP))
831     return NULL;
832 
833   tree arg = gimple_call_arg (gsi_stmt (gsi), 1);
834   if (TREE_CODE (arg) != ADDR_EXPR
835       || TREE_CODE (TREE_OPERAND (arg, 0)) != LABEL_DECL)
836     return NULL;
837 
838   basic_block recv_bb = label_to_block (cfun, TREE_OPERAND (arg, 0));
839   if (EDGE_COUNT (recv_bb->preds) != 1
840       || (EDGE_PRED (recv_bb, 0)->flags
841 	  & (EDGE_ABNORMAL | EDGE_EH)) != EDGE_ABNORMAL
842       || (EDGE_SUCC (bb, 0)->dest != EDGE_PRED (recv_bb, 0)->src
843 	  && EDGE_SUCC (bb, 1)->dest != EDGE_PRED (recv_bb, 0)->src))
844     return NULL;
845 
846   /* EDGE_PRED (recv_bb, 0)->src should be the .ABNORMAL_DISPATCHER bb.  */
847   return EDGE_PRED (recv_bb, 0);
848 }
849 
850 /* Do cleanup_control_flow_bb in PRE order.  */
851 
852 static bool
cleanup_control_flow_pre()853 cleanup_control_flow_pre ()
854 {
855   bool retval = false;
856 
857   /* We want remove_edge_and_dominated_blocks to only remove edges,
858      not dominated blocks which it does when dom info isn't available.
859      Pretend so.  */
860   dom_state saved_state = dom_info_state (CDI_DOMINATORS);
861   set_dom_info_availability (CDI_DOMINATORS, DOM_NONE);
862 
863   auto_vec<edge_iterator, 20> stack (n_basic_blocks_for_fn (cfun) + 2);
864   auto_sbitmap visited (last_basic_block_for_fn (cfun));
865   bitmap_clear (visited);
866 
867   vec<edge, va_gc> *setjmp_vec = NULL;
868   auto_vec<basic_block, 4> abnormal_dispatchers;
869 
870   stack.quick_push (ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs));
871 
872   while (! stack.is_empty ())
873     {
874       /* Look at the edge on the top of the stack.  */
875       edge_iterator ei = stack.last ();
876       basic_block dest = ei_edge (ei)->dest;
877 
878       if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
879 	  && !bitmap_bit_p (visited, dest->index)
880 	  && (ei_container (ei) == setjmp_vec
881 	      || !maybe_dead_abnormal_edge_p (ei_edge (ei))))
882 	{
883 	  bitmap_set_bit (visited, dest->index);
884 	  /* We only possibly remove edges from DEST here, leaving
885 	     possibly unreachable code in the IL.  */
886 	  retval |= cleanup_control_flow_bb (dest);
887 
888 	  /* Check for __builtin_setjmp_setup.  Edges from .ABNORMAL_DISPATCH
889 	     to __builtin_setjmp_receiver will be normally ignored by
890 	     maybe_dead_abnormal_edge_p.  If DEST is a visited
891 	     __builtin_setjmp_setup, queue edge from .ABNORMAL_DISPATCH
892 	     to __builtin_setjmp_receiver, so that it will be visited too.  */
893 	  if (edge e = builtin_setjmp_setup_bb (dest))
894 	    {
895 	      vec_safe_push (setjmp_vec, e);
896 	      if (vec_safe_length (setjmp_vec) == 1)
897 		stack.quick_push (ei_start (setjmp_vec));
898 	    }
899 
900 	  if ((ei_edge (ei)->flags
901 	       & (EDGE_ABNORMAL | EDGE_EH)) == EDGE_ABNORMAL)
902 	    {
903 	      gimple_stmt_iterator gsi
904 		= gsi_start_nondebug_after_labels_bb (dest);
905 	      gimple *g = gsi_stmt (gsi);
906 	      if (g && gimple_call_internal_p (g, IFN_ABNORMAL_DISPATCHER))
907 		abnormal_dispatchers.safe_push (dest);
908 	    }
909 
910 	  if (EDGE_COUNT (dest->succs) > 0)
911 	    stack.quick_push (ei_start (dest->succs));
912 	}
913       else
914 	{
915 	  if (!ei_one_before_end_p (ei))
916 	    ei_next (&stack.last ());
917 	  else
918 	    {
919 	      if (ei_container (ei) == setjmp_vec)
920 		vec_safe_truncate (setjmp_vec, 0);
921 	      stack.pop ();
922 	    }
923 	}
924     }
925 
926   vec_free (setjmp_vec);
927 
928   /* If we've marked .ABNORMAL_DISPATCHER basic block(s) as visited
929      above, but haven't marked any of their successors as visited,
930      unmark them now, so that they can be removed as useless.  */
931   basic_block dispatcher_bb;
932   unsigned int k;
933   FOR_EACH_VEC_ELT (abnormal_dispatchers, k, dispatcher_bb)
934     {
935       edge e;
936       edge_iterator ei;
937       FOR_EACH_EDGE (e, ei, dispatcher_bb->succs)
938 	if (bitmap_bit_p (visited, e->dest->index))
939 	  break;
940       if (e == NULL)
941 	bitmap_clear_bit (visited, dispatcher_bb->index);
942     }
943 
944   set_dom_info_availability (CDI_DOMINATORS, saved_state);
945 
946   /* We are deleting BBs in non-reverse dominator order, make sure
947      insert_debug_temps_for_defs is prepared for that.  */
948   if (retval)
949     free_dominance_info (CDI_DOMINATORS);
950 
951   /* Remove all now (and previously) unreachable blocks.  */
952   for (int i = NUM_FIXED_BLOCKS; i < last_basic_block_for_fn (cfun); ++i)
953     {
954       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
955       if (bb && !bitmap_bit_p (visited, bb->index))
956 	{
957 	  if (!retval)
958 	    free_dominance_info (CDI_DOMINATORS);
959 	  delete_basic_block (bb);
960 	  retval = true;
961 	}
962     }
963 
964   return retval;
965 }
966 
967 static bool
mfb_keep_latches(edge e)968 mfb_keep_latches (edge e)
969 {
970   return !((dom_info_available_p (CDI_DOMINATORS)
971 	    && dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
972 	   || (e->flags & EDGE_DFS_BACK));
973 }
974 
975 /* Remove unreachable blocks and other miscellaneous clean up work.
976    Return true if the flowgraph was modified, false otherwise.  */
977 
978 static bool
cleanup_tree_cfg_noloop(unsigned ssa_update_flags)979 cleanup_tree_cfg_noloop (unsigned ssa_update_flags)
980 {
981   timevar_push (TV_TREE_CLEANUP_CFG);
982 
983   /* Ensure that we have single entries into loop headers.  Otherwise
984      if one of the entries is becoming a latch due to CFG cleanup
985      (from formerly being part of an irreducible region) then we mess
986      up loop fixup and associate the old loop with a different region
987      which makes niter upper bounds invalid.  See for example PR80549.
988      This needs to be done before we remove trivially dead edges as
989      we need to capture the dominance state before the pending transform.  */
990   if (current_loops)
991     {
992       /* This needs backedges or dominators.  */
993       if (!dom_info_available_p (CDI_DOMINATORS))
994 	mark_dfs_back_edges ();
995 
996       loop_p loop;
997       unsigned i;
998       FOR_EACH_VEC_ELT (*get_loops (cfun), i, loop)
999 	if (loop && loop->header)
1000 	  {
1001 	    basic_block bb = loop->header;
1002 	    edge_iterator ei;
1003 	    edge e;
1004 	    bool found_latch = false;
1005 	    bool any_abnormal = false;
1006 	    unsigned n = 0;
1007 	    /* We are only interested in preserving existing loops, but
1008 	       we need to check whether they are still real and of course
1009 	       if we need to add a preheader at all.  */
1010 	    FOR_EACH_EDGE (e, ei, bb->preds)
1011 	      {
1012 		if (e->flags & EDGE_ABNORMAL)
1013 		  {
1014 		    any_abnormal = true;
1015 		    break;
1016 		  }
1017 		if ((dom_info_available_p (CDI_DOMINATORS)
1018 		     && dominated_by_p (CDI_DOMINATORS, e->src, bb))
1019 		    || (e->flags & EDGE_DFS_BACK))
1020 		  {
1021 		    found_latch = true;
1022 		    continue;
1023 		  }
1024 		n++;
1025 	      }
1026 	    /* If we have more than one entry to the loop header
1027 	       create a forwarder.  */
1028 	    if (found_latch && ! any_abnormal && n > 1)
1029 	      {
1030 		edge fallthru = make_forwarder_block (bb, mfb_keep_latches,
1031 						      NULL);
1032 		loop->header = fallthru->dest;
1033 		if (! loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1034 		  {
1035 		    /* The loop updating from the CFG hook is incomplete
1036 		       when we have multiple latches, fixup manually.  */
1037 		    remove_bb_from_loops (fallthru->src);
1038 		    loop_p cloop = loop;
1039 		    FOR_EACH_EDGE (e, ei, fallthru->src->preds)
1040 		      cloop = find_common_loop (cloop, e->src->loop_father);
1041 		    add_bb_to_loop (fallthru->src, cloop);
1042 		  }
1043 	      }
1044 	  }
1045     }
1046 
1047   /* Prepare the worklists of altered blocks.  */
1048   cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
1049 
1050   /* Start by iterating over all basic blocks in PRE order looking for
1051      edge removal opportunities.  Do this first because incoming SSA form
1052      may be invalid and we want to avoid performing SSA related tasks such
1053      as propgating out a PHI node during BB merging in that state.  This
1054      also gets rid of unreachable blocks.  */
1055   bool changed = cleanup_control_flow_pre ();
1056 
1057   /* After doing the above SSA form should be valid (or an update SSA
1058      should be required).  */
1059   if (ssa_update_flags)
1060     update_ssa (ssa_update_flags);
1061 
1062   /* Compute dominator info which we need for the iterative process below.  */
1063   if (!dom_info_available_p (CDI_DOMINATORS))
1064     calculate_dominance_info (CDI_DOMINATORS);
1065   else
1066     checking_verify_dominators (CDI_DOMINATORS);
1067 
1068   /* During forwarder block cleanup, we may redirect edges out of
1069      SWITCH_EXPRs, which can get expensive.  So we want to enable
1070      recording of edge to CASE_LABEL_EXPR.  */
1071   start_recording_case_labels ();
1072 
1073   /* Continue by iterating over all basic blocks looking for BB merging
1074      opportunities.  We cannot use FOR_EACH_BB_FN for the BB iteration
1075      since the basic blocks may get removed.  */
1076   unsigned n = last_basic_block_for_fn (cfun);
1077   for (unsigned i = NUM_FIXED_BLOCKS; i < n; i++)
1078     {
1079       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
1080       if (bb)
1081 	changed |= cleanup_tree_cfg_bb (bb);
1082     }
1083 
1084   /* Now process the altered blocks, as long as any are available.  */
1085   while (!bitmap_empty_p (cfgcleanup_altered_bbs))
1086     {
1087       unsigned i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
1088       bitmap_clear_bit (cfgcleanup_altered_bbs, i);
1089       if (i < NUM_FIXED_BLOCKS)
1090 	continue;
1091 
1092       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
1093       if (!bb)
1094 	continue;
1095 
1096       /* BB merging done by cleanup_tree_cfg_bb can end up propagating
1097 	 out single-argument PHIs which in turn can expose
1098 	 cleanup_control_flow_bb opportunities so we have to repeat
1099 	 that here.  */
1100       changed |= cleanup_control_flow_bb (bb);
1101       changed |= cleanup_tree_cfg_bb (bb);
1102     }
1103 
1104   end_recording_case_labels ();
1105   BITMAP_FREE (cfgcleanup_altered_bbs);
1106 
1107   gcc_assert (dom_info_available_p (CDI_DOMINATORS));
1108 
1109   /* Do not renumber blocks if the SCEV cache is active, it is indexed by
1110      basic-block numbers.  */
1111   if (! scev_initialized_p ())
1112     compact_blocks ();
1113 
1114   checking_verify_flow_info ();
1115 
1116   timevar_pop (TV_TREE_CLEANUP_CFG);
1117 
1118   if (changed && current_loops)
1119     {
1120       /* Removing edges and/or blocks may make recorded bounds refer
1121          to stale GIMPLE stmts now, so clear them.  */
1122       free_numbers_of_iterations_estimates (cfun);
1123       loops_state_set (LOOPS_NEED_FIXUP);
1124     }
1125 
1126   return changed;
1127 }
1128 
1129 /* Repairs loop structures.  */
1130 
1131 static void
repair_loop_structures(void)1132 repair_loop_structures (void)
1133 {
1134   bitmap changed_bbs;
1135   unsigned n_new_loops;
1136 
1137   calculate_dominance_info (CDI_DOMINATORS);
1138 
1139   timevar_push (TV_REPAIR_LOOPS);
1140   changed_bbs = BITMAP_ALLOC (NULL);
1141   n_new_loops = fix_loop_structure (changed_bbs);
1142 
1143   /* This usually does nothing.  But sometimes parts of cfg that originally
1144      were inside a loop get out of it due to edge removal (since they
1145      become unreachable by back edges from latch).  Also a former
1146      irreducible loop can become reducible - in this case force a full
1147      rewrite into loop-closed SSA form.  */
1148   if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
1149     rewrite_into_loop_closed_ssa (n_new_loops ? NULL : changed_bbs,
1150 				  TODO_update_ssa);
1151 
1152   BITMAP_FREE (changed_bbs);
1153 
1154   checking_verify_loop_structure ();
1155   scev_reset ();
1156 
1157   timevar_pop (TV_REPAIR_LOOPS);
1158 }
1159 
1160 /* Cleanup cfg and repair loop structures.  */
1161 
1162 bool
cleanup_tree_cfg(unsigned ssa_update_flags)1163 cleanup_tree_cfg (unsigned ssa_update_flags)
1164 {
1165   bool changed = cleanup_tree_cfg_noloop (ssa_update_flags);
1166 
1167   if (current_loops != NULL
1168       && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1169     repair_loop_structures ();
1170 
1171   return changed;
1172 }
1173 
1174 /* Tries to merge the PHI nodes at BB into those at BB's sole successor.
1175    Returns true if successful.  */
1176 
1177 static bool
remove_forwarder_block_with_phi(basic_block bb)1178 remove_forwarder_block_with_phi (basic_block bb)
1179 {
1180   edge succ = single_succ_edge (bb);
1181   basic_block dest = succ->dest;
1182   gimple *label;
1183   basic_block dombb, domdest, dom;
1184 
1185   /* We check for infinite loops already in tree_forwarder_block_p.
1186      However it may happen that the infinite loop is created
1187      afterwards due to removal of forwarders.  */
1188   if (dest == bb)
1189     return false;
1190 
1191   /* Removal of forwarders may expose new natural loops and thus
1192      a block may turn into a loop header.  */
1193   if (current_loops && bb_loop_header_p (bb))
1194     return false;
1195 
1196   /* If the destination block consists of a nonlocal label, do not
1197      merge it.  */
1198   label = first_stmt (dest);
1199   if (label)
1200     if (glabel *label_stmt = dyn_cast <glabel *> (label))
1201       if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
1202 	return false;
1203 
1204   /* Record BB's single pred in case we need to update the father
1205      loop's latch information later.  */
1206   basic_block pred = NULL;
1207   if (single_pred_p (bb))
1208     pred = single_pred (bb);
1209   bool dest_single_pred_p = single_pred_p (dest);
1210 
1211   /* Redirect each incoming edge to BB to DEST.  */
1212   while (EDGE_COUNT (bb->preds) > 0)
1213     {
1214       edge e = EDGE_PRED (bb, 0), s;
1215       gphi_iterator gsi;
1216 
1217       s = find_edge (e->src, dest);
1218       if (s)
1219 	{
1220 	  /* We already have an edge S from E->src to DEST.  If S and
1221 	     E->dest's sole successor edge have the same PHI arguments
1222 	     at DEST, redirect S to DEST.  */
1223 	  if (phi_alternatives_equal (dest, s, succ))
1224 	    {
1225 	      e = redirect_edge_and_branch (e, dest);
1226 	      redirect_edge_var_map_clear (e);
1227 	      continue;
1228 	    }
1229 
1230 	  /* PHI arguments are different.  Create a forwarder block by
1231 	     splitting E so that we can merge PHI arguments on E to
1232 	     DEST.  */
1233 	  e = single_succ_edge (split_edge (e));
1234 	}
1235       else
1236 	{
1237 	  /* If we merge the forwarder into a loop header verify if we
1238 	     are creating another loop latch edge.  If so, reset
1239 	     number of iteration information of the loop.  */
1240 	  if (dest->loop_father->header == dest
1241 	      && dominated_by_p (CDI_DOMINATORS, e->src, dest))
1242 	    {
1243 	      dest->loop_father->any_upper_bound = false;
1244 	      dest->loop_father->any_likely_upper_bound = false;
1245 	      free_numbers_of_iterations_estimates (dest->loop_father);
1246 	    }
1247 	}
1248 
1249       s = redirect_edge_and_branch (e, dest);
1250 
1251       /* redirect_edge_and_branch must not create a new edge.  */
1252       gcc_assert (s == e);
1253 
1254       /* Add to the PHI nodes at DEST each PHI argument removed at the
1255 	 destination of E.  */
1256       for (gsi = gsi_start_phis (dest);
1257 	   !gsi_end_p (gsi);
1258 	   gsi_next (&gsi))
1259 	{
1260 	  gphi *phi = gsi.phi ();
1261 	  tree def = gimple_phi_arg_def (phi, succ->dest_idx);
1262 	  location_t locus = gimple_phi_arg_location_from_edge (phi, succ);
1263 
1264 	  if (TREE_CODE (def) == SSA_NAME)
1265 	    {
1266 	      /* If DEF is one of the results of PHI nodes removed during
1267 		 redirection, replace it with the PHI argument that used
1268 		 to be on E.  */
1269 	      vec<edge_var_map> *head = redirect_edge_var_map_vector (e);
1270 	      size_t length = head ? head->length () : 0;
1271 	      for (size_t i = 0; i < length; i++)
1272 		{
1273 		  edge_var_map *vm = &(*head)[i];
1274 		  tree old_arg = redirect_edge_var_map_result (vm);
1275 		  tree new_arg = redirect_edge_var_map_def (vm);
1276 
1277 		  if (def == old_arg)
1278 		    {
1279 		      def = new_arg;
1280 		      locus = redirect_edge_var_map_location (vm);
1281 		      break;
1282 		    }
1283 		}
1284 	    }
1285 
1286 	  add_phi_arg (phi, def, s, locus);
1287 	}
1288 
1289       redirect_edge_var_map_clear (e);
1290     }
1291 
1292   /* Move debug statements.  Reset them if the destination does not
1293      have a single predecessor.  */
1294   move_debug_stmts_from_forwarder (bb, dest, dest_single_pred_p);
1295 
1296   /* Update the dominators.  */
1297   dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
1298   domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
1299   if (domdest == bb)
1300     {
1301       /* Shortcut to avoid calling (relatively expensive)
1302 	 nearest_common_dominator unless necessary.  */
1303       dom = dombb;
1304     }
1305   else
1306     dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
1307 
1308   set_immediate_dominator (CDI_DOMINATORS, dest, dom);
1309 
1310   /* Adjust latch infomation of BB's parent loop as otherwise
1311      the cfg hook has a hard time not to kill the loop.  */
1312   if (current_loops && bb->loop_father->latch == bb)
1313     bb->loop_father->latch = pred;
1314 
1315   /* Remove BB since all of BB's incoming edges have been redirected
1316      to DEST.  */
1317   delete_basic_block (bb);
1318 
1319   return true;
1320 }
1321 
1322 /* This pass merges PHI nodes if one feeds into another.  For example,
1323    suppose we have the following:
1324 
1325   goto <bb 9> (<L9>);
1326 
1327 <L8>:;
1328   tem_17 = foo ();
1329 
1330   # tem_6 = PHI <tem_17(8), tem_23(7)>;
1331 <L9>:;
1332 
1333   # tem_3 = PHI <tem_6(9), tem_2(5)>;
1334 <L10>:;
1335 
1336   Then we merge the first PHI node into the second one like so:
1337 
1338   goto <bb 9> (<L10>);
1339 
1340 <L8>:;
1341   tem_17 = foo ();
1342 
1343   # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
1344 <L10>:;
1345 */
1346 
1347 namespace {
1348 
1349 const pass_data pass_data_merge_phi =
1350 {
1351   GIMPLE_PASS, /* type */
1352   "mergephi", /* name */
1353   OPTGROUP_NONE, /* optinfo_flags */
1354   TV_TREE_MERGE_PHI, /* tv_id */
1355   ( PROP_cfg | PROP_ssa ), /* properties_required */
1356   0, /* properties_provided */
1357   0, /* properties_destroyed */
1358   0, /* todo_flags_start */
1359   0, /* todo_flags_finish */
1360 };
1361 
1362 class pass_merge_phi : public gimple_opt_pass
1363 {
1364 public:
pass_merge_phi(gcc::context * ctxt)1365   pass_merge_phi (gcc::context *ctxt)
1366     : gimple_opt_pass (pass_data_merge_phi, ctxt)
1367   {}
1368 
1369   /* opt_pass methods: */
clone()1370   opt_pass * clone () { return new pass_merge_phi (m_ctxt); }
1371   virtual unsigned int execute (function *);
1372 
1373 }; // class pass_merge_phi
1374 
1375 unsigned int
execute(function * fun)1376 pass_merge_phi::execute (function *fun)
1377 {
1378   basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (fun));
1379   basic_block *current = worklist;
1380   basic_block bb;
1381 
1382   calculate_dominance_info (CDI_DOMINATORS);
1383 
1384   /* Find all PHI nodes that we may be able to merge.  */
1385   FOR_EACH_BB_FN (bb, fun)
1386     {
1387       basic_block dest;
1388 
1389       /* Look for a forwarder block with PHI nodes.  */
1390       if (!tree_forwarder_block_p (bb, true))
1391 	continue;
1392 
1393       dest = single_succ (bb);
1394 
1395       /* We have to feed into another basic block with PHI
1396 	 nodes.  */
1397       if (gimple_seq_empty_p (phi_nodes (dest))
1398 	  /* We don't want to deal with a basic block with
1399 	     abnormal edges.  */
1400 	  || bb_has_abnormal_pred (bb))
1401 	continue;
1402 
1403       if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
1404 	{
1405 	  /* If BB does not dominate DEST, then the PHI nodes at
1406 	     DEST must be the only users of the results of the PHI
1407 	     nodes at BB.  */
1408 	  *current++ = bb;
1409 	}
1410       else
1411 	{
1412 	  gphi_iterator gsi;
1413 	  unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
1414 
1415 	  /* BB dominates DEST.  There may be many users of the PHI
1416 	     nodes in BB.  However, there is still a trivial case we
1417 	     can handle.  If the result of every PHI in BB is used
1418 	     only by a PHI in DEST, then we can trivially merge the
1419 	     PHI nodes from BB into DEST.  */
1420 	  for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
1421 	       gsi_next (&gsi))
1422 	    {
1423 	      gphi *phi = gsi.phi ();
1424 	      tree result = gimple_phi_result (phi);
1425 	      use_operand_p imm_use;
1426 	      gimple *use_stmt;
1427 
1428 	      /* If the PHI's result is never used, then we can just
1429 		 ignore it.  */
1430 	      if (has_zero_uses (result))
1431 		continue;
1432 
1433 	      /* Get the single use of the result of this PHI node.  */
1434   	      if (!single_imm_use (result, &imm_use, &use_stmt)
1435 		  || gimple_code (use_stmt) != GIMPLE_PHI
1436 		  || gimple_bb (use_stmt) != dest
1437 		  || gimple_phi_arg_def (use_stmt, dest_idx) != result)
1438 		break;
1439 	    }
1440 
1441 	  /* If the loop above iterated through all the PHI nodes
1442 	     in BB, then we can merge the PHIs from BB into DEST.  */
1443 	  if (gsi_end_p (gsi))
1444 	    *current++ = bb;
1445 	}
1446     }
1447 
1448   /* Now let's drain WORKLIST.  */
1449   bool changed = false;
1450   while (current != worklist)
1451     {
1452       bb = *--current;
1453       changed |= remove_forwarder_block_with_phi (bb);
1454     }
1455   free (worklist);
1456 
1457   /* Removing forwarder blocks can cause formerly irreducible loops
1458      to become reducible if we merged two entry blocks.  */
1459   if (changed
1460       && current_loops)
1461     loops_state_set (LOOPS_NEED_FIXUP);
1462 
1463   return 0;
1464 }
1465 
1466 } // anon namespace
1467 
1468 gimple_opt_pass *
make_pass_merge_phi(gcc::context * ctxt)1469 make_pass_merge_phi (gcc::context *ctxt)
1470 {
1471   return new pass_merge_phi (ctxt);
1472 }
1473 
1474 /* Pass: cleanup the CFG just before expanding trees to RTL.
1475    This is just a round of label cleanups and case node grouping
1476    because after the tree optimizers have run such cleanups may
1477    be necessary.  */
1478 
1479 static unsigned int
execute_cleanup_cfg_post_optimizing(void)1480 execute_cleanup_cfg_post_optimizing (void)
1481 {
1482   unsigned int todo = execute_fixup_cfg ();
1483   if (cleanup_tree_cfg ())
1484     {
1485       todo &= ~TODO_cleanup_cfg;
1486       todo |= TODO_update_ssa;
1487     }
1488   maybe_remove_unreachable_handlers ();
1489   cleanup_dead_labels ();
1490   if (group_case_labels ())
1491     todo |= TODO_cleanup_cfg;
1492   if ((flag_compare_debug_opt || flag_compare_debug)
1493       && flag_dump_final_insns)
1494     {
1495       FILE *final_output = fopen (flag_dump_final_insns, "a");
1496 
1497       if (!final_output)
1498 	{
1499 	  error ("could not open final insn dump file %qs: %m",
1500 		 flag_dump_final_insns);
1501 	  flag_dump_final_insns = NULL;
1502 	}
1503       else
1504 	{
1505 	  int save_unnumbered = flag_dump_unnumbered;
1506 	  int save_noaddr = flag_dump_noaddr;
1507 
1508 	  flag_dump_noaddr = flag_dump_unnumbered = 1;
1509 	  fprintf (final_output, "\n");
1510 	  dump_enumerated_decls (final_output,
1511 				 dump_flags | TDF_SLIM | TDF_NOUID);
1512 	  flag_dump_noaddr = save_noaddr;
1513 	  flag_dump_unnumbered = save_unnumbered;
1514 	  if (fclose (final_output))
1515 	    {
1516 	      error ("could not close final insn dump file %qs: %m",
1517 		     flag_dump_final_insns);
1518 	      flag_dump_final_insns = NULL;
1519 	    }
1520 	}
1521     }
1522   return todo;
1523 }
1524 
1525 namespace {
1526 
1527 const pass_data pass_data_cleanup_cfg_post_optimizing =
1528 {
1529   GIMPLE_PASS, /* type */
1530   "optimized", /* name */
1531   OPTGROUP_NONE, /* optinfo_flags */
1532   TV_TREE_CLEANUP_CFG, /* tv_id */
1533   PROP_cfg, /* properties_required */
1534   0, /* properties_provided */
1535   0, /* properties_destroyed */
1536   0, /* todo_flags_start */
1537   TODO_remove_unused_locals, /* todo_flags_finish */
1538 };
1539 
1540 class pass_cleanup_cfg_post_optimizing : public gimple_opt_pass
1541 {
1542 public:
pass_cleanup_cfg_post_optimizing(gcc::context * ctxt)1543   pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1544     : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing, ctxt)
1545   {}
1546 
1547   /* opt_pass methods: */
execute(function *)1548   virtual unsigned int execute (function *)
1549     {
1550       return execute_cleanup_cfg_post_optimizing ();
1551     }
1552 
1553 }; // class pass_cleanup_cfg_post_optimizing
1554 
1555 } // anon namespace
1556 
1557 gimple_opt_pass *
make_pass_cleanup_cfg_post_optimizing(gcc::context * ctxt)1558 make_pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1559 {
1560   return new pass_cleanup_cfg_post_optimizing (ctxt);
1561 }
1562 
1563 
1564 /* Delete all unreachable basic blocks and update callgraph.
1565    Doing so is somewhat nontrivial because we need to update all clones and
1566    remove inline function that become unreachable.  */
1567 
1568 bool
delete_unreachable_blocks_update_callgraph(cgraph_node * dst_node,bool update_clones)1569 delete_unreachable_blocks_update_callgraph (cgraph_node *dst_node,
1570 					    bool update_clones)
1571 {
1572   bool changed = false;
1573   basic_block b, next_bb;
1574 
1575   find_unreachable_blocks ();
1576 
1577   /* Delete all unreachable basic blocks.  */
1578 
1579   for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
1580        != EXIT_BLOCK_PTR_FOR_FN (cfun); b = next_bb)
1581     {
1582       next_bb = b->next_bb;
1583 
1584       if (!(b->flags & BB_REACHABLE))
1585 	{
1586           gimple_stmt_iterator bsi;
1587 
1588           for (bsi = gsi_start_bb (b); !gsi_end_p (bsi); gsi_next (&bsi))
1589 	    {
1590 	      struct cgraph_edge *e;
1591 	      struct cgraph_node *node;
1592 
1593 	      dst_node->remove_stmt_references (gsi_stmt (bsi));
1594 
1595 	      if (gimple_code (gsi_stmt (bsi)) == GIMPLE_CALL
1596 		  &&(e = dst_node->get_edge (gsi_stmt (bsi))) != NULL)
1597 		{
1598 		  if (!e->inline_failed)
1599 		    e->callee->remove_symbol_and_inline_clones (dst_node);
1600 		  else
1601 		    e->remove ();
1602 		}
1603 	      if (update_clones && dst_node->clones)
1604 		for (node = dst_node->clones; node != dst_node;)
1605 		  {
1606 		    node->remove_stmt_references (gsi_stmt (bsi));
1607 		    if (gimple_code (gsi_stmt (bsi)) == GIMPLE_CALL
1608 			&& (e = node->get_edge (gsi_stmt (bsi))) != NULL)
1609 		      {
1610 			if (!e->inline_failed)
1611 			  e->callee->remove_symbol_and_inline_clones (dst_node);
1612 			else
1613 			  e->remove ();
1614 		      }
1615 
1616 		    if (node->clones)
1617 		      node = node->clones;
1618 		    else if (node->next_sibling_clone)
1619 		      node = node->next_sibling_clone;
1620 		    else
1621 		      {
1622 			while (node != dst_node && !node->next_sibling_clone)
1623 			  node = node->clone_of;
1624 			if (node != dst_node)
1625 			  node = node->next_sibling_clone;
1626 		      }
1627 		  }
1628 	    }
1629 	  delete_basic_block (b);
1630 	  changed = true;
1631 	}
1632     }
1633 
1634   return changed;
1635 }
1636 
1637