1 /* CFG cleanup for trees.
2    Copyright (C) 2001-2019 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10 
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "diagnostic-core.h"
31 #include "fold-const.h"
32 #include "cfganal.h"
33 #include "cfgcleanup.h"
34 #include "tree-eh.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-ssa-loop-manip.h"
39 #include "tree-dfa.h"
40 #include "tree-ssa.h"
41 #include "cfgloop.h"
42 #include "tree-scalar-evolution.h"
43 #include "gimple-match.h"
44 #include "gimple-fold.h"
45 #include "tree-ssa-loop-niter.h"
46 #include "cgraph.h"
47 #include "tree-into-ssa.h"
48 #include "tree-cfgcleanup.h"
49 
50 
51 /* The set of blocks in that at least one of the following changes happened:
52    -- the statement at the end of the block was changed
53    -- the block was newly created
54    -- the set of the predecessors of the block changed
55    -- the set of the successors of the block changed
56    ??? Maybe we could track these changes separately, since they determine
57        what cleanups it makes sense to try on the block.  */
58 bitmap cfgcleanup_altered_bbs;
59 
60 /* Remove any fallthru edge from EV.  Return true if an edge was removed.  */
61 
62 static bool
remove_fallthru_edge(vec<edge,va_gc> * ev)63 remove_fallthru_edge (vec<edge, va_gc> *ev)
64 {
65   edge_iterator ei;
66   edge e;
67 
68   FOR_EACH_EDGE (e, ei, ev)
69     if ((e->flags & EDGE_FALLTHRU) != 0)
70       {
71 	if (e->flags & EDGE_COMPLEX)
72 	  e->flags &= ~EDGE_FALLTHRU;
73 	else
74 	  remove_edge_and_dominated_blocks (e);
75 	return true;
76       }
77   return false;
78 }
79 
80 /* Convert a SWTCH with single non-default case to gcond and replace it
81    at GSI.  */
82 
83 static bool
convert_single_case_switch(gswitch * swtch,gimple_stmt_iterator & gsi)84 convert_single_case_switch (gswitch *swtch, gimple_stmt_iterator &gsi)
85 {
86   if (gimple_switch_num_labels (swtch) != 2)
87     return false;
88 
89   tree index = gimple_switch_index (swtch);
90   tree label = gimple_switch_label (swtch, 1);
91   tree low = CASE_LOW (label);
92   tree high = CASE_HIGH (label);
93 
94   basic_block default_bb = gimple_switch_default_bb (cfun, swtch);
95   basic_block case_bb = label_to_block (cfun, CASE_LABEL (label));
96 
97   basic_block bb = gimple_bb (swtch);
98   gcond *cond;
99 
100   /* Replace switch statement with condition statement.  */
101   if (high)
102     {
103       tree lhs, rhs;
104       generate_range_test (bb, index, low, high, &lhs, &rhs);
105       cond = gimple_build_cond (LE_EXPR, lhs, rhs, NULL_TREE, NULL_TREE);
106     }
107   else
108     cond = gimple_build_cond (EQ_EXPR, index,
109 			      fold_convert (TREE_TYPE (index), low),
110 			      NULL_TREE, NULL_TREE);
111 
112   gsi_replace (&gsi, cond, true);
113 
114   /* Update edges.  */
115   edge case_edge = find_edge (bb, case_bb);
116   edge default_edge = find_edge (bb, default_bb);
117 
118   case_edge->flags |= EDGE_TRUE_VALUE;
119   default_edge->flags |= EDGE_FALSE_VALUE;
120   return true;
121 }
122 
123 /* Disconnect an unreachable block in the control expression starting
124    at block BB.  */
125 
126 static bool
cleanup_control_expr_graph(basic_block bb,gimple_stmt_iterator gsi)127 cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
128 {
129   edge taken_edge;
130   bool retval = false;
131   gimple *stmt = gsi_stmt (gsi);
132 
133   if (!single_succ_p (bb))
134     {
135       edge e;
136       edge_iterator ei;
137       bool warned;
138       tree val = NULL_TREE;
139 
140       /* Try to convert a switch with just a single non-default case to
141 	 GIMPLE condition.  */
142       if (gimple_code (stmt) == GIMPLE_SWITCH
143 	  && convert_single_case_switch (as_a<gswitch *> (stmt), gsi))
144 	stmt = gsi_stmt (gsi);
145 
146       fold_defer_overflow_warnings ();
147       switch (gimple_code (stmt))
148 	{
149 	case GIMPLE_COND:
150 	  {
151 	    gimple_match_op res_op;
152 	    if (gimple_simplify (stmt, &res_op, NULL, no_follow_ssa_edges,
153 				 no_follow_ssa_edges)
154 		&& res_op.code == INTEGER_CST)
155 	      val = res_op.ops[0];
156 	  }
157 	  break;
158 
159 	case GIMPLE_SWITCH:
160 	  val = gimple_switch_index (as_a <gswitch *> (stmt));
161 	  break;
162 
163 	default:
164 	  ;
165 	}
166       taken_edge = find_taken_edge (bb, val);
167       if (!taken_edge)
168 	{
169 	  fold_undefer_and_ignore_overflow_warnings ();
170 	  return false;
171 	}
172 
173       /* Remove all the edges except the one that is always executed.  */
174       warned = false;
175       for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
176 	{
177 	  if (e != taken_edge)
178 	    {
179 	      if (!warned)
180 		{
181 		  fold_undefer_overflow_warnings
182 		    (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
183 		  warned = true;
184 		}
185 
186 	      taken_edge->probability += e->probability;
187 	      remove_edge_and_dominated_blocks (e);
188 	      retval = true;
189 	    }
190 	  else
191 	    ei_next (&ei);
192 	}
193       if (!warned)
194 	fold_undefer_and_ignore_overflow_warnings ();
195     }
196   else
197     taken_edge = single_succ_edge (bb);
198 
199   bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
200   gsi_remove (&gsi, true);
201   taken_edge->flags = EDGE_FALLTHRU;
202 
203   return retval;
204 }
205 
206 /* Cleanup the GF_CALL_CTRL_ALTERING flag according to
207    to updated gimple_call_flags.  */
208 
209 static void
cleanup_call_ctrl_altering_flag(gimple * bb_end)210 cleanup_call_ctrl_altering_flag (gimple *bb_end)
211 {
212   if (!is_gimple_call (bb_end)
213       || !gimple_call_ctrl_altering_p (bb_end))
214     return;
215 
216   int flags = gimple_call_flags (bb_end);
217   if (((flags & (ECF_CONST | ECF_PURE))
218        && !(flags & ECF_LOOPING_CONST_OR_PURE))
219       || (flags & ECF_LEAF))
220     gimple_call_set_ctrl_altering (bb_end, false);
221 }
222 
223 /* Try to remove superfluous control structures in basic block BB.  Returns
224    true if anything changes.  */
225 
226 static bool
cleanup_control_flow_bb(basic_block bb)227 cleanup_control_flow_bb (basic_block bb)
228 {
229   gimple_stmt_iterator gsi;
230   bool retval = false;
231   gimple *stmt;
232 
233   /* If the last statement of the block could throw and now cannot,
234      we need to prune cfg.  */
235   retval |= gimple_purge_dead_eh_edges (bb);
236 
237   gsi = gsi_last_nondebug_bb (bb);
238   if (gsi_end_p (gsi))
239     return retval;
240 
241   stmt = gsi_stmt (gsi);
242 
243   /* Try to cleanup ctrl altering flag for call which ends bb.  */
244   cleanup_call_ctrl_altering_flag (stmt);
245 
246   if (gimple_code (stmt) == GIMPLE_COND
247       || gimple_code (stmt) == GIMPLE_SWITCH)
248     {
249       gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
250       retval |= cleanup_control_expr_graph (bb, gsi);
251     }
252   else if (gimple_code (stmt) == GIMPLE_GOTO
253 	   && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
254 	   && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
255 	       == LABEL_DECL))
256     {
257       /* If we had a computed goto which has a compile-time determinable
258 	 destination, then we can eliminate the goto.  */
259       edge e;
260       tree label;
261       edge_iterator ei;
262       basic_block target_block;
263 
264       gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
265       /* First look at all the outgoing edges.  Delete any outgoing
266 	 edges which do not go to the right block.  For the one
267 	 edge which goes to the right block, fix up its flags.  */
268       label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
269       if (DECL_CONTEXT (label) != cfun->decl)
270 	return retval;
271       target_block = label_to_block (cfun, label);
272       for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
273 	{
274 	  if (e->dest != target_block)
275 	    remove_edge_and_dominated_blocks (e);
276 	  else
277 	    {
278 	      /* Turn off the EDGE_ABNORMAL flag.  */
279 	      e->flags &= ~EDGE_ABNORMAL;
280 
281 	      /* And set EDGE_FALLTHRU.  */
282 	      e->flags |= EDGE_FALLTHRU;
283 	      ei_next (&ei);
284 	    }
285 	}
286 
287       bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
288       bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
289 
290       /* Remove the GOTO_EXPR as it is not needed.  The CFG has all the
291 	 relevant information we need.  */
292       gsi_remove (&gsi, true);
293       retval = true;
294     }
295 
296   /* Check for indirect calls that have been turned into
297      noreturn calls.  */
298   else if (is_gimple_call (stmt)
299 	   && gimple_call_noreturn_p (stmt))
300     {
301       /* If there are debug stmts after the noreturn call, remove them
302 	 now, they should be all unreachable anyway.  */
303       for (gsi_next (&gsi); !gsi_end_p (gsi); )
304 	gsi_remove (&gsi, true);
305       if (remove_fallthru_edge (bb->succs))
306 	retval = true;
307     }
308 
309   return retval;
310 }
311 
312 /* Return true if basic block BB does nothing except pass control
313    flow to another block and that we can safely insert a label at
314    the start of the successor block.
315 
316    As a precondition, we require that BB be not equal to
317    the entry block.  */
318 
319 static bool
tree_forwarder_block_p(basic_block bb,bool phi_wanted)320 tree_forwarder_block_p (basic_block bb, bool phi_wanted)
321 {
322   gimple_stmt_iterator gsi;
323   location_t locus;
324 
325   /* BB must have a single outgoing edge.  */
326   if (single_succ_p (bb) != 1
327       /* If PHI_WANTED is false, BB must not have any PHI nodes.
328 	 Otherwise, BB must have PHI nodes.  */
329       || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
330       /* BB may not be a predecessor of the exit block.  */
331       || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
332       /* Nor should this be an infinite loop.  */
333       || single_succ (bb) == bb
334       /* BB may not have an abnormal outgoing edge.  */
335       || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
336     return false;
337 
338   gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun));
339 
340   locus = single_succ_edge (bb)->goto_locus;
341 
342   /* There should not be an edge coming from entry, or an EH edge.  */
343   {
344     edge_iterator ei;
345     edge e;
346 
347     FOR_EACH_EDGE (e, ei, bb->preds)
348       if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || (e->flags & EDGE_EH))
349 	return false;
350       /* If goto_locus of any of the edges differs, prevent removing
351 	 the forwarder block when not optimizing.  */
352       else if (!optimize
353 	       && (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
354 		   || LOCATION_LOCUS (locus) != UNKNOWN_LOCATION)
355 	       && e->goto_locus != locus)
356 	return false;
357   }
358 
359   /* Now walk through the statements backward.  We can ignore labels,
360      anything else means this is not a forwarder block.  */
361   for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
362     {
363       gimple *stmt = gsi_stmt (gsi);
364 
365       switch (gimple_code (stmt))
366 	{
367 	case GIMPLE_LABEL:
368 	  if (DECL_NONLOCAL (gimple_label_label (as_a <glabel *> (stmt))))
369 	    return false;
370 	  if (!optimize
371 	      && (gimple_has_location (stmt)
372 		  || LOCATION_LOCUS (locus) != UNKNOWN_LOCATION)
373 	      && gimple_location (stmt) != locus)
374 	    return false;
375 	  break;
376 
377 	  /* ??? For now, hope there's a corresponding debug
378 	     assignment at the destination.  */
379 	case GIMPLE_DEBUG:
380 	  break;
381 
382 	default:
383 	  return false;
384 	}
385     }
386 
387   if (current_loops)
388     {
389       basic_block dest;
390       /* Protect loop headers.  */
391       if (bb_loop_header_p (bb))
392 	return false;
393 
394       dest = EDGE_SUCC (bb, 0)->dest;
395       /* Protect loop preheaders and latches if requested.  */
396       if (dest->loop_father->header == dest)
397 	{
398 	  if (bb->loop_father == dest->loop_father)
399 	    {
400 	      if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
401 		return false;
402 	      /* If bb doesn't have a single predecessor we'd make this
403 		 loop have multiple latches.  Don't do that if that
404 		 would in turn require disambiguating them.  */
405 	      return (single_pred_p (bb)
406 		      || loops_state_satisfies_p
407 		      	   (LOOPS_MAY_HAVE_MULTIPLE_LATCHES));
408 	    }
409 	  else if (bb->loop_father == loop_outer (dest->loop_father))
410 	    return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS);
411 	  /* Always preserve other edges into loop headers that are
412 	     not simple latches or preheaders.  */
413 	  return false;
414 	}
415     }
416 
417   return true;
418 }
419 
420 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
421    those alternatives are equal in each of the PHI nodes, then return
422    true, else return false.  */
423 
424 static bool
phi_alternatives_equal(basic_block dest,edge e1,edge e2)425 phi_alternatives_equal (basic_block dest, edge e1, edge e2)
426 {
427   int n1 = e1->dest_idx;
428   int n2 = e2->dest_idx;
429   gphi_iterator gsi;
430 
431   for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
432     {
433       gphi *phi = gsi.phi ();
434       tree val1 = gimple_phi_arg_def (phi, n1);
435       tree val2 = gimple_phi_arg_def (phi, n2);
436 
437       gcc_assert (val1 != NULL_TREE);
438       gcc_assert (val2 != NULL_TREE);
439 
440       if (!operand_equal_for_phi_arg_p (val1, val2))
441 	return false;
442     }
443 
444   return true;
445 }
446 
447 /* Move debug stmts from the forwarder block SRC to DEST.  */
448 
449 static void
move_debug_stmts_from_forwarder(basic_block src,basic_block dest,bool dest_single_pred_p)450 move_debug_stmts_from_forwarder (basic_block src, basic_block dest,
451 				 bool dest_single_pred_p)
452 {
453   if (!MAY_HAVE_DEBUG_STMTS)
454     return;
455 
456   gimple_stmt_iterator gsi_to = gsi_after_labels (dest);
457   for (gimple_stmt_iterator gsi = gsi_after_labels (src); !gsi_end_p (gsi);)
458     {
459       gimple *debug = gsi_stmt (gsi);
460       gcc_assert (is_gimple_debug (debug));
461       /* Move debug binds anyway, but not anything else like begin-stmt
462 	 markers unless they are always valid at the destination.  */
463       if (dest_single_pred_p
464 	  || gimple_debug_bind_p (debug))
465 	{
466 	  gsi_move_before (&gsi, &gsi_to);
467 	  /* Reset debug-binds that are not always valid at the destination.
468 	     Simply dropping them can cause earlier values to become live,
469 	     generating wrong debug information.
470 	     ???  There are several things we could improve here.  For
471 	     one we might be able to move stmts to the predecessor.
472 	     For anther, if the debug stmt is immediately followed by a
473 	     (debug) definition in the destination (on a post-dominated path?)
474 	     we can elide it without any bad effects.  */
475 	  if (!dest_single_pred_p)
476 	    {
477 	      gimple_debug_bind_reset_value (debug);
478 	      update_stmt (debug);
479 	    }
480 	}
481       else
482 	gsi_next (&gsi);
483     }
484 }
485 
486 /* Removes forwarder block BB.  Returns false if this failed.  */
487 
488 static bool
remove_forwarder_block(basic_block bb)489 remove_forwarder_block (basic_block bb)
490 {
491   edge succ = single_succ_edge (bb), e, s;
492   basic_block dest = succ->dest;
493   gimple *stmt;
494   edge_iterator ei;
495   gimple_stmt_iterator gsi, gsi_to;
496 
497   /* We check for infinite loops already in tree_forwarder_block_p.
498      However it may happen that the infinite loop is created
499      afterwards due to removal of forwarders.  */
500   if (dest == bb)
501     return false;
502 
503   /* If the destination block consists of a nonlocal label or is a
504      EH landing pad, do not merge it.  */
505   stmt = first_stmt (dest);
506   if (stmt)
507     if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
508       if (DECL_NONLOCAL (gimple_label_label (label_stmt))
509 	  || EH_LANDING_PAD_NR (gimple_label_label (label_stmt)) != 0)
510 	return false;
511 
512   /* If there is an abnormal edge to basic block BB, but not into
513      dest, problems might occur during removal of the phi node at out
514      of ssa due to overlapping live ranges of registers.
515 
516      If there is an abnormal edge in DEST, the problems would occur
517      anyway since cleanup_dead_labels would then merge the labels for
518      two different eh regions, and rest of exception handling code
519      does not like it.
520 
521      So if there is an abnormal edge to BB, proceed only if there is
522      no abnormal edge to DEST and there are no phi nodes in DEST.  */
523   if (bb_has_abnormal_pred (bb)
524       && (bb_has_abnormal_pred (dest)
525 	  || !gimple_seq_empty_p (phi_nodes (dest))))
526     return false;
527 
528   /* If there are phi nodes in DEST, and some of the blocks that are
529      predecessors of BB are also predecessors of DEST, check that the
530      phi node arguments match.  */
531   if (!gimple_seq_empty_p (phi_nodes (dest)))
532     {
533       FOR_EACH_EDGE (e, ei, bb->preds)
534 	{
535 	  s = find_edge (e->src, dest);
536 	  if (!s)
537 	    continue;
538 
539 	  if (!phi_alternatives_equal (dest, succ, s))
540 	    return false;
541 	}
542     }
543 
544   basic_block pred = NULL;
545   if (single_pred_p (bb))
546     pred = single_pred (bb);
547   bool dest_single_pred_p = single_pred_p (dest);
548 
549   /* Redirect the edges.  */
550   for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
551     {
552       bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
553 
554       if (e->flags & EDGE_ABNORMAL)
555 	{
556 	  /* If there is an abnormal edge, redirect it anyway, and
557 	     move the labels to the new block to make it legal.  */
558 	  s = redirect_edge_succ_nodup (e, dest);
559 	}
560       else
561 	s = redirect_edge_and_branch (e, dest);
562 
563       if (s == e)
564 	{
565 	  /* Create arguments for the phi nodes, since the edge was not
566 	     here before.  */
567 	  for (gphi_iterator psi = gsi_start_phis (dest);
568 	       !gsi_end_p (psi);
569 	       gsi_next (&psi))
570 	    {
571 	      gphi *phi = psi.phi ();
572 	      location_t l = gimple_phi_arg_location_from_edge (phi, succ);
573 	      tree def = gimple_phi_arg_def (phi, succ->dest_idx);
574 	      add_phi_arg (phi, unshare_expr (def), s, l);
575 	    }
576 	}
577     }
578 
579   /* Move nonlocal labels and computed goto targets as well as user
580      defined labels and labels with an EH landing pad number to the
581      new block, so that the redirection of the abnormal edges works,
582      jump targets end up in a sane place and debug information for
583      labels is retained.  */
584   gsi_to = gsi_start_bb (dest);
585   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
586     {
587       stmt = gsi_stmt (gsi);
588       if (is_gimple_debug (stmt))
589 	break;
590 
591       /* Forwarder blocks can only contain labels and debug stmts, and
592 	 labels must come first, so if we get to this point, we know
593 	 we're looking at a label.  */
594       tree decl = gimple_label_label (as_a <glabel *> (stmt));
595       if (EH_LANDING_PAD_NR (decl) != 0
596 	  || DECL_NONLOCAL (decl)
597 	  || FORCED_LABEL (decl)
598 	  || !DECL_ARTIFICIAL (decl))
599 	gsi_move_before (&gsi, &gsi_to);
600       else
601 	gsi_next (&gsi);
602     }
603 
604   /* Move debug statements.  Reset them if the destination does not
605      have a single predecessor.  */
606   move_debug_stmts_from_forwarder (bb, dest, dest_single_pred_p);
607 
608   bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
609 
610   /* Update the dominators.  */
611   if (dom_info_available_p (CDI_DOMINATORS))
612     {
613       basic_block dom, dombb, domdest;
614 
615       dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
616       domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
617       if (domdest == bb)
618 	{
619 	  /* Shortcut to avoid calling (relatively expensive)
620 	     nearest_common_dominator unless necessary.  */
621 	  dom = dombb;
622 	}
623       else
624 	dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
625 
626       set_immediate_dominator (CDI_DOMINATORS, dest, dom);
627     }
628 
629   /* Adjust latch infomation of BB's parent loop as otherwise
630      the cfg hook has a hard time not to kill the loop.  */
631   if (current_loops && bb->loop_father->latch == bb)
632     bb->loop_father->latch = pred;
633 
634   /* And kill the forwarder block.  */
635   delete_basic_block (bb);
636 
637   return true;
638 }
639 
640 /* STMT is a call that has been discovered noreturn.  Split the
641    block to prepare fixing up the CFG and remove LHS.
642    Return true if cleanup-cfg needs to run.  */
643 
644 bool
fixup_noreturn_call(gimple * stmt)645 fixup_noreturn_call (gimple *stmt)
646 {
647   basic_block bb = gimple_bb (stmt);
648   bool changed = false;
649 
650   if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
651     return false;
652 
653   /* First split basic block if stmt is not last.  */
654   if (stmt != gsi_stmt (gsi_last_bb (bb)))
655     {
656       if (stmt == gsi_stmt (gsi_last_nondebug_bb (bb)))
657 	{
658 	  /* Don't split if there are only debug stmts
659 	     after stmt, that can result in -fcompare-debug
660 	     failures.  Remove the debug stmts instead,
661 	     they should be all unreachable anyway.  */
662 	  gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
663 	  for (gsi_next (&gsi); !gsi_end_p (gsi); )
664 	    gsi_remove (&gsi, true);
665 	}
666       else
667 	{
668 	  split_block (bb, stmt);
669 	  changed = true;
670 	}
671     }
672 
673   /* If there is an LHS, remove it, but only if its type has fixed size.
674      The LHS will need to be recreated during RTL expansion and creating
675      temporaries of variable-sized types is not supported.  Also don't
676      do this with TREE_ADDRESSABLE types, as assign_temp will abort.
677      Drop LHS regardless of TREE_ADDRESSABLE, if the function call
678      has been changed into a call that does not return a value, like
679      __builtin_unreachable or __cxa_pure_virtual.  */
680   tree lhs = gimple_call_lhs (stmt);
681   if (lhs
682       && (should_remove_lhs_p (lhs)
683 	  || VOID_TYPE_P (TREE_TYPE (gimple_call_fntype (stmt)))))
684     {
685       gimple_call_set_lhs (stmt, NULL_TREE);
686 
687       /* We need to fix up the SSA name to avoid checking errors.  */
688       if (TREE_CODE (lhs) == SSA_NAME)
689 	{
690 	  tree new_var = create_tmp_reg (TREE_TYPE (lhs));
691 	  SET_SSA_NAME_VAR_OR_IDENTIFIER (lhs, new_var);
692 	  SSA_NAME_DEF_STMT (lhs) = gimple_build_nop ();
693 	  set_ssa_default_def (cfun, new_var, lhs);
694 	}
695 
696       update_stmt (stmt);
697     }
698 
699   /* Mark the call as altering control flow.  */
700   if (!gimple_call_ctrl_altering_p (stmt))
701     {
702       gimple_call_set_ctrl_altering (stmt, true);
703       changed = true;
704     }
705 
706   return changed;
707 }
708 
709 /* Return true if we want to merge BB1 and BB2 into a single block.  */
710 
711 static bool
want_merge_blocks_p(basic_block bb1,basic_block bb2)712 want_merge_blocks_p (basic_block bb1, basic_block bb2)
713 {
714   if (!can_merge_blocks_p (bb1, bb2))
715     return false;
716   gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb1);
717   if (gsi_end_p (gsi) || !stmt_can_terminate_bb_p (gsi_stmt (gsi)))
718     return true;
719   return bb1->count.ok_for_merging (bb2->count);
720 }
721 
722 
723 /* Tries to cleanup cfg in basic block BB by merging blocks.  Returns
724    true if anything changes.  */
725 
726 static bool
cleanup_tree_cfg_bb(basic_block bb)727 cleanup_tree_cfg_bb (basic_block bb)
728 {
729   if (tree_forwarder_block_p (bb, false)
730       && remove_forwarder_block (bb))
731     return true;
732 
733   /* If there is a merge opportunity with the predecessor
734      do nothing now but wait until we process the predecessor.
735      This happens when we visit BBs in a non-optimal order and
736      avoids quadratic behavior with adjusting stmts BB pointer.  */
737   if (single_pred_p (bb)
738       && want_merge_blocks_p (single_pred (bb), bb))
739     /* But make sure we _do_ visit it.  When we remove unreachable paths
740        ending in a backedge we fail to mark the destinations predecessors
741        as changed.  */
742     bitmap_set_bit (cfgcleanup_altered_bbs, single_pred (bb)->index);
743 
744   /* Merging the blocks may create new opportunities for folding
745      conditional branches (due to the elimination of single-valued PHI
746      nodes).  */
747   else if (single_succ_p (bb)
748 	   && want_merge_blocks_p (bb, single_succ (bb)))
749     {
750       merge_blocks (bb, single_succ (bb));
751       return true;
752     }
753 
754   return false;
755 }
756 
757 /* Return true if E is an EDGE_ABNORMAL edge for returns_twice calls,
758    i.e. one going from .ABNORMAL_DISPATCHER to basic block which doesn't
759    start with a forced or nonlocal label.  Calls which return twice can return
760    the second time only if they are called normally the first time, so basic
761    blocks which can be only entered through these abnormal edges but not
762    normally are effectively unreachable as well.  Additionally ignore
763    __builtin_setjmp_receiver starting blocks, which have one FORCED_LABEL
764    and which are always only reachable through EDGE_ABNORMAL edge.  They are
765    handled in cleanup_control_flow_pre.  */
766 
767 static bool
maybe_dead_abnormal_edge_p(edge e)768 maybe_dead_abnormal_edge_p (edge e)
769 {
770   if ((e->flags & (EDGE_ABNORMAL | EDGE_EH)) != EDGE_ABNORMAL)
771     return false;
772 
773   gimple_stmt_iterator gsi = gsi_start_nondebug_after_labels_bb (e->src);
774   gimple *g = gsi_stmt (gsi);
775   if (!g || !gimple_call_internal_p (g, IFN_ABNORMAL_DISPATCHER))
776     return false;
777 
778   tree target = NULL_TREE;
779   for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
780     if (glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi)))
781       {
782 	tree this_target = gimple_label_label (label_stmt);
783 	if (DECL_NONLOCAL (this_target))
784 	  return false;
785 	if (FORCED_LABEL (this_target))
786 	  {
787 	    if (target)
788 	      return false;
789 	    target = this_target;
790 	  }
791       }
792     else
793       break;
794 
795   if (target)
796     {
797       /* If there was a single FORCED_LABEL, check for
798 	 __builtin_setjmp_receiver with address of that label.  */
799       if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
800 	gsi_next_nondebug (&gsi);
801       if (gsi_end_p (gsi))
802 	return false;
803       if (!gimple_call_builtin_p (gsi_stmt (gsi), BUILT_IN_SETJMP_RECEIVER))
804 	return false;
805 
806       tree arg = gimple_call_arg (gsi_stmt (gsi), 0);
807       if (TREE_CODE (arg) != ADDR_EXPR || TREE_OPERAND (arg, 0) != target)
808 	return false;
809     }
810   return true;
811 }
812 
813 /* If BB is a basic block ending with __builtin_setjmp_setup, return edge
814    from .ABNORMAL_DISPATCHER basic block to corresponding
815    __builtin_setjmp_receiver basic block, otherwise return NULL.  */
816 static edge
builtin_setjmp_setup_bb(basic_block bb)817 builtin_setjmp_setup_bb (basic_block bb)
818 {
819   if (EDGE_COUNT (bb->succs) != 2
820       || ((EDGE_SUCC (bb, 0)->flags
821 	   & (EDGE_ABNORMAL | EDGE_EH)) != EDGE_ABNORMAL
822 	  && (EDGE_SUCC (bb, 1)->flags
823 	      & (EDGE_ABNORMAL | EDGE_EH)) != EDGE_ABNORMAL))
824     return NULL;
825 
826   gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
827   if (gsi_end_p (gsi)
828       || !gimple_call_builtin_p (gsi_stmt (gsi), BUILT_IN_SETJMP_SETUP))
829     return NULL;
830 
831   tree arg = gimple_call_arg (gsi_stmt (gsi), 1);
832   if (TREE_CODE (arg) != ADDR_EXPR
833       || TREE_CODE (TREE_OPERAND (arg, 0)) != LABEL_DECL)
834     return NULL;
835 
836   basic_block recv_bb = label_to_block (cfun, TREE_OPERAND (arg, 0));
837   if (EDGE_COUNT (recv_bb->preds) != 1
838       || (EDGE_PRED (recv_bb, 0)->flags
839 	  & (EDGE_ABNORMAL | EDGE_EH)) != EDGE_ABNORMAL
840       || (EDGE_SUCC (bb, 0)->dest != EDGE_PRED (recv_bb, 0)->src
841 	  && EDGE_SUCC (bb, 1)->dest != EDGE_PRED (recv_bb, 0)->src))
842     return NULL;
843 
844   /* EDGE_PRED (recv_bb, 0)->src should be the .ABNORMAL_DISPATCHER bb.  */
845   return EDGE_PRED (recv_bb, 0);
846 }
847 
848 /* Do cleanup_control_flow_bb in PRE order.  */
849 
850 static bool
cleanup_control_flow_pre()851 cleanup_control_flow_pre ()
852 {
853   bool retval = false;
854 
855   /* We want remove_edge_and_dominated_blocks to only remove edges,
856      not dominated blocks which it does when dom info isn't available.
857      Pretend so.  */
858   dom_state saved_state = dom_info_state (CDI_DOMINATORS);
859   set_dom_info_availability (CDI_DOMINATORS, DOM_NONE);
860 
861   auto_vec<edge_iterator, 20> stack (n_basic_blocks_for_fn (cfun) + 2);
862   auto_sbitmap visited (last_basic_block_for_fn (cfun));
863   bitmap_clear (visited);
864 
865   vec<edge, va_gc> *setjmp_vec = NULL;
866   auto_vec<basic_block, 4> abnormal_dispatchers;
867 
868   stack.quick_push (ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs));
869 
870   while (! stack.is_empty ())
871     {
872       /* Look at the edge on the top of the stack.  */
873       edge_iterator ei = stack.last ();
874       basic_block dest = ei_edge (ei)->dest;
875 
876       if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
877 	  && !bitmap_bit_p (visited, dest->index)
878 	  && (ei_container (ei) == setjmp_vec
879 	      || !maybe_dead_abnormal_edge_p (ei_edge (ei))))
880 	{
881 	  bitmap_set_bit (visited, dest->index);
882 	  /* We only possibly remove edges from DEST here, leaving
883 	     possibly unreachable code in the IL.  */
884 	  retval |= cleanup_control_flow_bb (dest);
885 
886 	  /* Check for __builtin_setjmp_setup.  Edges from .ABNORMAL_DISPATCH
887 	     to __builtin_setjmp_receiver will be normally ignored by
888 	     maybe_dead_abnormal_edge_p.  If DEST is a visited
889 	     __builtin_setjmp_setup, queue edge from .ABNORMAL_DISPATCH
890 	     to __builtin_setjmp_receiver, so that it will be visited too.  */
891 	  if (edge e = builtin_setjmp_setup_bb (dest))
892 	    {
893 	      vec_safe_push (setjmp_vec, e);
894 	      if (vec_safe_length (setjmp_vec) == 1)
895 		stack.quick_push (ei_start (setjmp_vec));
896 	    }
897 
898 	  if ((ei_edge (ei)->flags
899 	       & (EDGE_ABNORMAL | EDGE_EH)) == EDGE_ABNORMAL)
900 	    {
901 	      gimple_stmt_iterator gsi
902 		= gsi_start_nondebug_after_labels_bb (dest);
903 	      gimple *g = gsi_stmt (gsi);
904 	      if (g && gimple_call_internal_p (g, IFN_ABNORMAL_DISPATCHER))
905 		abnormal_dispatchers.safe_push (dest);
906 	    }
907 
908 	  if (EDGE_COUNT (dest->succs) > 0)
909 	    stack.quick_push (ei_start (dest->succs));
910 	}
911       else
912 	{
913 	  if (!ei_one_before_end_p (ei))
914 	    ei_next (&stack.last ());
915 	  else
916 	    {
917 	      if (ei_container (ei) == setjmp_vec)
918 		vec_safe_truncate (setjmp_vec, 0);
919 	      stack.pop ();
920 	    }
921 	}
922     }
923 
924   vec_free (setjmp_vec);
925 
926   /* If we've marked .ABNORMAL_DISPATCHER basic block(s) as visited
927      above, but haven't marked any of their successors as visited,
928      unmark them now, so that they can be removed as useless.  */
929   basic_block dispatcher_bb;
930   unsigned int k;
931   FOR_EACH_VEC_ELT (abnormal_dispatchers, k, dispatcher_bb)
932     {
933       edge e;
934       edge_iterator ei;
935       FOR_EACH_EDGE (e, ei, dispatcher_bb->succs)
936 	if (bitmap_bit_p (visited, e->dest->index))
937 	  break;
938       if (e == NULL)
939 	bitmap_clear_bit (visited, dispatcher_bb->index);
940     }
941 
942   set_dom_info_availability (CDI_DOMINATORS, saved_state);
943 
944   /* We are deleting BBs in non-reverse dominator order, make sure
945      insert_debug_temps_for_defs is prepared for that.  */
946   if (retval)
947     free_dominance_info (CDI_DOMINATORS);
948 
949   /* Remove all now (and previously) unreachable blocks.  */
950   for (int i = NUM_FIXED_BLOCKS; i < last_basic_block_for_fn (cfun); ++i)
951     {
952       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
953       if (bb && !bitmap_bit_p (visited, bb->index))
954 	{
955 	  if (!retval)
956 	    free_dominance_info (CDI_DOMINATORS);
957 	  delete_basic_block (bb);
958 	  retval = true;
959 	}
960     }
961 
962   return retval;
963 }
964 
965 static bool
mfb_keep_latches(edge e)966 mfb_keep_latches (edge e)
967 {
968   return !((dom_info_available_p (CDI_DOMINATORS)
969 	    && dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
970 	   || (e->flags & EDGE_DFS_BACK));
971 }
972 
973 /* Remove unreachable blocks and other miscellaneous clean up work.
974    Return true if the flowgraph was modified, false otherwise.  */
975 
976 static bool
cleanup_tree_cfg_noloop(unsigned ssa_update_flags)977 cleanup_tree_cfg_noloop (unsigned ssa_update_flags)
978 {
979   timevar_push (TV_TREE_CLEANUP_CFG);
980 
981   /* Ensure that we have single entries into loop headers.  Otherwise
982      if one of the entries is becoming a latch due to CFG cleanup
983      (from formerly being part of an irreducible region) then we mess
984      up loop fixup and associate the old loop with a different region
985      which makes niter upper bounds invalid.  See for example PR80549.
986      This needs to be done before we remove trivially dead edges as
987      we need to capture the dominance state before the pending transform.  */
988   if (current_loops)
989     {
990       /* This needs backedges or dominators.  */
991       if (!dom_info_available_p (CDI_DOMINATORS))
992 	mark_dfs_back_edges ();
993 
994       loop_p loop;
995       unsigned i;
996       FOR_EACH_VEC_ELT (*get_loops (cfun), i, loop)
997 	if (loop && loop->header)
998 	  {
999 	    basic_block bb = loop->header;
1000 	    edge_iterator ei;
1001 	    edge e;
1002 	    bool found_latch = false;
1003 	    bool any_abnormal = false;
1004 	    unsigned n = 0;
1005 	    /* We are only interested in preserving existing loops, but
1006 	       we need to check whether they are still real and of course
1007 	       if we need to add a preheader at all.  */
1008 	    FOR_EACH_EDGE (e, ei, bb->preds)
1009 	      {
1010 		if (e->flags & EDGE_ABNORMAL)
1011 		  {
1012 		    any_abnormal = true;
1013 		    break;
1014 		  }
1015 		if ((dom_info_available_p (CDI_DOMINATORS)
1016 		     && dominated_by_p (CDI_DOMINATORS, e->src, bb))
1017 		    || (e->flags & EDGE_DFS_BACK))
1018 		  {
1019 		    found_latch = true;
1020 		    continue;
1021 		  }
1022 		n++;
1023 	      }
1024 	    /* If we have more than one entry to the loop header
1025 	       create a forwarder.  */
1026 	    if (found_latch && ! any_abnormal && n > 1)
1027 	      {
1028 		edge fallthru = make_forwarder_block (bb, mfb_keep_latches,
1029 						      NULL);
1030 		loop->header = fallthru->dest;
1031 		if (! loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1032 		  {
1033 		    /* The loop updating from the CFG hook is incomplete
1034 		       when we have multiple latches, fixup manually.  */
1035 		    remove_bb_from_loops (fallthru->src);
1036 		    loop_p cloop = loop;
1037 		    FOR_EACH_EDGE (e, ei, fallthru->src->preds)
1038 		      cloop = find_common_loop (cloop, e->src->loop_father);
1039 		    add_bb_to_loop (fallthru->src, cloop);
1040 		  }
1041 	      }
1042 	  }
1043     }
1044 
1045   /* Prepare the worklists of altered blocks.  */
1046   cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
1047 
1048   /* Start by iterating over all basic blocks in PRE order looking for
1049      edge removal opportunities.  Do this first because incoming SSA form
1050      may be invalid and we want to avoid performing SSA related tasks such
1051      as propgating out a PHI node during BB merging in that state.  This
1052      also gets rid of unreachable blocks.  */
1053   bool changed = cleanup_control_flow_pre ();
1054 
1055   /* After doing the above SSA form should be valid (or an update SSA
1056      should be required).  */
1057   if (ssa_update_flags)
1058     update_ssa (ssa_update_flags);
1059 
1060   /* Compute dominator info which we need for the iterative process below.  */
1061   if (!dom_info_available_p (CDI_DOMINATORS))
1062     calculate_dominance_info (CDI_DOMINATORS);
1063   else
1064     checking_verify_dominators (CDI_DOMINATORS);
1065 
1066   /* During forwarder block cleanup, we may redirect edges out of
1067      SWITCH_EXPRs, which can get expensive.  So we want to enable
1068      recording of edge to CASE_LABEL_EXPR.  */
1069   start_recording_case_labels ();
1070 
1071   /* Continue by iterating over all basic blocks looking for BB merging
1072      opportunities.  We cannot use FOR_EACH_BB_FN for the BB iteration
1073      since the basic blocks may get removed.  */
1074   unsigned n = last_basic_block_for_fn (cfun);
1075   for (unsigned i = NUM_FIXED_BLOCKS; i < n; i++)
1076     {
1077       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
1078       if (bb)
1079 	changed |= cleanup_tree_cfg_bb (bb);
1080     }
1081 
1082   /* Now process the altered blocks, as long as any are available.  */
1083   while (!bitmap_empty_p (cfgcleanup_altered_bbs))
1084     {
1085       unsigned i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
1086       bitmap_clear_bit (cfgcleanup_altered_bbs, i);
1087       if (i < NUM_FIXED_BLOCKS)
1088 	continue;
1089 
1090       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
1091       if (!bb)
1092 	continue;
1093 
1094       /* BB merging done by cleanup_tree_cfg_bb can end up propagating
1095 	 out single-argument PHIs which in turn can expose
1096 	 cleanup_control_flow_bb opportunities so we have to repeat
1097 	 that here.  */
1098       changed |= cleanup_control_flow_bb (bb);
1099       changed |= cleanup_tree_cfg_bb (bb);
1100     }
1101 
1102   end_recording_case_labels ();
1103   BITMAP_FREE (cfgcleanup_altered_bbs);
1104 
1105   gcc_assert (dom_info_available_p (CDI_DOMINATORS));
1106 
1107   /* Do not renumber blocks if the SCEV cache is active, it is indexed by
1108      basic-block numbers.  */
1109   if (! scev_initialized_p ())
1110     compact_blocks ();
1111 
1112   checking_verify_flow_info ();
1113 
1114   timevar_pop (TV_TREE_CLEANUP_CFG);
1115 
1116   if (changed && current_loops)
1117     {
1118       /* Removing edges and/or blocks may make recorded bounds refer
1119          to stale GIMPLE stmts now, so clear them.  */
1120       free_numbers_of_iterations_estimates (cfun);
1121       loops_state_set (LOOPS_NEED_FIXUP);
1122     }
1123 
1124   return changed;
1125 }
1126 
1127 /* Repairs loop structures.  */
1128 
1129 static void
repair_loop_structures(void)1130 repair_loop_structures (void)
1131 {
1132   bitmap changed_bbs;
1133   unsigned n_new_loops;
1134 
1135   calculate_dominance_info (CDI_DOMINATORS);
1136 
1137   timevar_push (TV_REPAIR_LOOPS);
1138   changed_bbs = BITMAP_ALLOC (NULL);
1139   n_new_loops = fix_loop_structure (changed_bbs);
1140 
1141   /* This usually does nothing.  But sometimes parts of cfg that originally
1142      were inside a loop get out of it due to edge removal (since they
1143      become unreachable by back edges from latch).  Also a former
1144      irreducible loop can become reducible - in this case force a full
1145      rewrite into loop-closed SSA form.  */
1146   if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
1147     rewrite_into_loop_closed_ssa (n_new_loops ? NULL : changed_bbs,
1148 				  TODO_update_ssa);
1149 
1150   BITMAP_FREE (changed_bbs);
1151 
1152   checking_verify_loop_structure ();
1153   scev_reset ();
1154 
1155   timevar_pop (TV_REPAIR_LOOPS);
1156 }
1157 
1158 /* Cleanup cfg and repair loop structures.  */
1159 
1160 bool
cleanup_tree_cfg(unsigned ssa_update_flags)1161 cleanup_tree_cfg (unsigned ssa_update_flags)
1162 {
1163   bool changed = cleanup_tree_cfg_noloop (ssa_update_flags);
1164 
1165   if (current_loops != NULL
1166       && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1167     repair_loop_structures ();
1168 
1169   return changed;
1170 }
1171 
1172 /* Tries to merge the PHI nodes at BB into those at BB's sole successor.
1173    Returns true if successful.  */
1174 
1175 static bool
remove_forwarder_block_with_phi(basic_block bb)1176 remove_forwarder_block_with_phi (basic_block bb)
1177 {
1178   edge succ = single_succ_edge (bb);
1179   basic_block dest = succ->dest;
1180   gimple *label;
1181   basic_block dombb, domdest, dom;
1182 
1183   /* We check for infinite loops already in tree_forwarder_block_p.
1184      However it may happen that the infinite loop is created
1185      afterwards due to removal of forwarders.  */
1186   if (dest == bb)
1187     return false;
1188 
1189   /* Removal of forwarders may expose new natural loops and thus
1190      a block may turn into a loop header.  */
1191   if (current_loops && bb_loop_header_p (bb))
1192     return false;
1193 
1194   /* If the destination block consists of a nonlocal label, do not
1195      merge it.  */
1196   label = first_stmt (dest);
1197   if (label)
1198     if (glabel *label_stmt = dyn_cast <glabel *> (label))
1199       if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
1200 	return false;
1201 
1202   /* Record BB's single pred in case we need to update the father
1203      loop's latch information later.  */
1204   basic_block pred = NULL;
1205   if (single_pred_p (bb))
1206     pred = single_pred (bb);
1207   bool dest_single_pred_p = single_pred_p (dest);
1208 
1209   /* Redirect each incoming edge to BB to DEST.  */
1210   while (EDGE_COUNT (bb->preds) > 0)
1211     {
1212       edge e = EDGE_PRED (bb, 0), s;
1213       gphi_iterator gsi;
1214 
1215       s = find_edge (e->src, dest);
1216       if (s)
1217 	{
1218 	  /* We already have an edge S from E->src to DEST.  If S and
1219 	     E->dest's sole successor edge have the same PHI arguments
1220 	     at DEST, redirect S to DEST.  */
1221 	  if (phi_alternatives_equal (dest, s, succ))
1222 	    {
1223 	      e = redirect_edge_and_branch (e, dest);
1224 	      redirect_edge_var_map_clear (e);
1225 	      continue;
1226 	    }
1227 
1228 	  /* PHI arguments are different.  Create a forwarder block by
1229 	     splitting E so that we can merge PHI arguments on E to
1230 	     DEST.  */
1231 	  e = single_succ_edge (split_edge (e));
1232 	}
1233       else
1234 	{
1235 	  /* If we merge the forwarder into a loop header verify if we
1236 	     are creating another loop latch edge.  If so, reset
1237 	     number of iteration information of the loop.  */
1238 	  if (dest->loop_father->header == dest
1239 	      && dominated_by_p (CDI_DOMINATORS, e->src, dest))
1240 	    {
1241 	      dest->loop_father->any_upper_bound = false;
1242 	      dest->loop_father->any_likely_upper_bound = false;
1243 	      free_numbers_of_iterations_estimates (dest->loop_father);
1244 	    }
1245 	}
1246 
1247       s = redirect_edge_and_branch (e, dest);
1248 
1249       /* redirect_edge_and_branch must not create a new edge.  */
1250       gcc_assert (s == e);
1251 
1252       /* Add to the PHI nodes at DEST each PHI argument removed at the
1253 	 destination of E.  */
1254       for (gsi = gsi_start_phis (dest);
1255 	   !gsi_end_p (gsi);
1256 	   gsi_next (&gsi))
1257 	{
1258 	  gphi *phi = gsi.phi ();
1259 	  tree def = gimple_phi_arg_def (phi, succ->dest_idx);
1260 	  location_t locus = gimple_phi_arg_location_from_edge (phi, succ);
1261 
1262 	  if (TREE_CODE (def) == SSA_NAME)
1263 	    {
1264 	      /* If DEF is one of the results of PHI nodes removed during
1265 		 redirection, replace it with the PHI argument that used
1266 		 to be on E.  */
1267 	      vec<edge_var_map> *head = redirect_edge_var_map_vector (e);
1268 	      size_t length = head ? head->length () : 0;
1269 	      for (size_t i = 0; i < length; i++)
1270 		{
1271 		  edge_var_map *vm = &(*head)[i];
1272 		  tree old_arg = redirect_edge_var_map_result (vm);
1273 		  tree new_arg = redirect_edge_var_map_def (vm);
1274 
1275 		  if (def == old_arg)
1276 		    {
1277 		      def = new_arg;
1278 		      locus = redirect_edge_var_map_location (vm);
1279 		      break;
1280 		    }
1281 		}
1282 	    }
1283 
1284 	  add_phi_arg (phi, def, s, locus);
1285 	}
1286 
1287       redirect_edge_var_map_clear (e);
1288     }
1289 
1290   /* Move debug statements.  Reset them if the destination does not
1291      have a single predecessor.  */
1292   move_debug_stmts_from_forwarder (bb, dest, dest_single_pred_p);
1293 
1294   /* Update the dominators.  */
1295   dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
1296   domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
1297   if (domdest == bb)
1298     {
1299       /* Shortcut to avoid calling (relatively expensive)
1300 	 nearest_common_dominator unless necessary.  */
1301       dom = dombb;
1302     }
1303   else
1304     dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
1305 
1306   set_immediate_dominator (CDI_DOMINATORS, dest, dom);
1307 
1308   /* Adjust latch infomation of BB's parent loop as otherwise
1309      the cfg hook has a hard time not to kill the loop.  */
1310   if (current_loops && bb->loop_father->latch == bb)
1311     bb->loop_father->latch = pred;
1312 
1313   /* Remove BB since all of BB's incoming edges have been redirected
1314      to DEST.  */
1315   delete_basic_block (bb);
1316 
1317   return true;
1318 }
1319 
1320 /* This pass merges PHI nodes if one feeds into another.  For example,
1321    suppose we have the following:
1322 
1323   goto <bb 9> (<L9>);
1324 
1325 <L8>:;
1326   tem_17 = foo ();
1327 
1328   # tem_6 = PHI <tem_17(8), tem_23(7)>;
1329 <L9>:;
1330 
1331   # tem_3 = PHI <tem_6(9), tem_2(5)>;
1332 <L10>:;
1333 
1334   Then we merge the first PHI node into the second one like so:
1335 
1336   goto <bb 9> (<L10>);
1337 
1338 <L8>:;
1339   tem_17 = foo ();
1340 
1341   # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
1342 <L10>:;
1343 */
1344 
1345 namespace {
1346 
1347 const pass_data pass_data_merge_phi =
1348 {
1349   GIMPLE_PASS, /* type */
1350   "mergephi", /* name */
1351   OPTGROUP_NONE, /* optinfo_flags */
1352   TV_TREE_MERGE_PHI, /* tv_id */
1353   ( PROP_cfg | PROP_ssa ), /* properties_required */
1354   0, /* properties_provided */
1355   0, /* properties_destroyed */
1356   0, /* todo_flags_start */
1357   0, /* todo_flags_finish */
1358 };
1359 
1360 class pass_merge_phi : public gimple_opt_pass
1361 {
1362 public:
pass_merge_phi(gcc::context * ctxt)1363   pass_merge_phi (gcc::context *ctxt)
1364     : gimple_opt_pass (pass_data_merge_phi, ctxt)
1365   {}
1366 
1367   /* opt_pass methods: */
clone()1368   opt_pass * clone () { return new pass_merge_phi (m_ctxt); }
1369   virtual unsigned int execute (function *);
1370 
1371 }; // class pass_merge_phi
1372 
1373 unsigned int
execute(function * fun)1374 pass_merge_phi::execute (function *fun)
1375 {
1376   basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (fun));
1377   basic_block *current = worklist;
1378   basic_block bb;
1379 
1380   calculate_dominance_info (CDI_DOMINATORS);
1381 
1382   /* Find all PHI nodes that we may be able to merge.  */
1383   FOR_EACH_BB_FN (bb, fun)
1384     {
1385       basic_block dest;
1386 
1387       /* Look for a forwarder block with PHI nodes.  */
1388       if (!tree_forwarder_block_p (bb, true))
1389 	continue;
1390 
1391       dest = single_succ (bb);
1392 
1393       /* We have to feed into another basic block with PHI
1394 	 nodes.  */
1395       if (gimple_seq_empty_p (phi_nodes (dest))
1396 	  /* We don't want to deal with a basic block with
1397 	     abnormal edges.  */
1398 	  || bb_has_abnormal_pred (bb))
1399 	continue;
1400 
1401       if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
1402 	{
1403 	  /* If BB does not dominate DEST, then the PHI nodes at
1404 	     DEST must be the only users of the results of the PHI
1405 	     nodes at BB.  */
1406 	  *current++ = bb;
1407 	}
1408       else
1409 	{
1410 	  gphi_iterator gsi;
1411 	  unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
1412 
1413 	  /* BB dominates DEST.  There may be many users of the PHI
1414 	     nodes in BB.  However, there is still a trivial case we
1415 	     can handle.  If the result of every PHI in BB is used
1416 	     only by a PHI in DEST, then we can trivially merge the
1417 	     PHI nodes from BB into DEST.  */
1418 	  for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
1419 	       gsi_next (&gsi))
1420 	    {
1421 	      gphi *phi = gsi.phi ();
1422 	      tree result = gimple_phi_result (phi);
1423 	      use_operand_p imm_use;
1424 	      gimple *use_stmt;
1425 
1426 	      /* If the PHI's result is never used, then we can just
1427 		 ignore it.  */
1428 	      if (has_zero_uses (result))
1429 		continue;
1430 
1431 	      /* Get the single use of the result of this PHI node.  */
1432   	      if (!single_imm_use (result, &imm_use, &use_stmt)
1433 		  || gimple_code (use_stmt) != GIMPLE_PHI
1434 		  || gimple_bb (use_stmt) != dest
1435 		  || gimple_phi_arg_def (use_stmt, dest_idx) != result)
1436 		break;
1437 	    }
1438 
1439 	  /* If the loop above iterated through all the PHI nodes
1440 	     in BB, then we can merge the PHIs from BB into DEST.  */
1441 	  if (gsi_end_p (gsi))
1442 	    *current++ = bb;
1443 	}
1444     }
1445 
1446   /* Now let's drain WORKLIST.  */
1447   bool changed = false;
1448   while (current != worklist)
1449     {
1450       bb = *--current;
1451       changed |= remove_forwarder_block_with_phi (bb);
1452     }
1453   free (worklist);
1454 
1455   /* Removing forwarder blocks can cause formerly irreducible loops
1456      to become reducible if we merged two entry blocks.  */
1457   if (changed
1458       && current_loops)
1459     loops_state_set (LOOPS_NEED_FIXUP);
1460 
1461   return 0;
1462 }
1463 
1464 } // anon namespace
1465 
1466 gimple_opt_pass *
make_pass_merge_phi(gcc::context * ctxt)1467 make_pass_merge_phi (gcc::context *ctxt)
1468 {
1469   return new pass_merge_phi (ctxt);
1470 }
1471 
1472 /* Pass: cleanup the CFG just before expanding trees to RTL.
1473    This is just a round of label cleanups and case node grouping
1474    because after the tree optimizers have run such cleanups may
1475    be necessary.  */
1476 
1477 static unsigned int
execute_cleanup_cfg_post_optimizing(void)1478 execute_cleanup_cfg_post_optimizing (void)
1479 {
1480   unsigned int todo = execute_fixup_cfg ();
1481   if (cleanup_tree_cfg ())
1482     {
1483       todo &= ~TODO_cleanup_cfg;
1484       todo |= TODO_update_ssa;
1485     }
1486   maybe_remove_unreachable_handlers ();
1487   cleanup_dead_labels ();
1488   if (group_case_labels ())
1489     todo |= TODO_cleanup_cfg;
1490   if ((flag_compare_debug_opt || flag_compare_debug)
1491       && flag_dump_final_insns)
1492     {
1493       FILE *final_output = fopen (flag_dump_final_insns, "a");
1494 
1495       if (!final_output)
1496 	{
1497 	  error ("could not open final insn dump file %qs: %m",
1498 		 flag_dump_final_insns);
1499 	  flag_dump_final_insns = NULL;
1500 	}
1501       else
1502 	{
1503 	  int save_unnumbered = flag_dump_unnumbered;
1504 	  int save_noaddr = flag_dump_noaddr;
1505 
1506 	  flag_dump_noaddr = flag_dump_unnumbered = 1;
1507 	  fprintf (final_output, "\n");
1508 	  dump_enumerated_decls (final_output,
1509 				 dump_flags | TDF_SLIM | TDF_NOUID);
1510 	  flag_dump_noaddr = save_noaddr;
1511 	  flag_dump_unnumbered = save_unnumbered;
1512 	  if (fclose (final_output))
1513 	    {
1514 	      error ("could not close final insn dump file %qs: %m",
1515 		     flag_dump_final_insns);
1516 	      flag_dump_final_insns = NULL;
1517 	    }
1518 	}
1519     }
1520   return todo;
1521 }
1522 
1523 namespace {
1524 
1525 const pass_data pass_data_cleanup_cfg_post_optimizing =
1526 {
1527   GIMPLE_PASS, /* type */
1528   "optimized", /* name */
1529   OPTGROUP_NONE, /* optinfo_flags */
1530   TV_TREE_CLEANUP_CFG, /* tv_id */
1531   PROP_cfg, /* properties_required */
1532   0, /* properties_provided */
1533   0, /* properties_destroyed */
1534   0, /* todo_flags_start */
1535   TODO_remove_unused_locals, /* todo_flags_finish */
1536 };
1537 
1538 class pass_cleanup_cfg_post_optimizing : public gimple_opt_pass
1539 {
1540 public:
pass_cleanup_cfg_post_optimizing(gcc::context * ctxt)1541   pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1542     : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing, ctxt)
1543   {}
1544 
1545   /* opt_pass methods: */
execute(function *)1546   virtual unsigned int execute (function *)
1547     {
1548       return execute_cleanup_cfg_post_optimizing ();
1549     }
1550 
1551 }; // class pass_cleanup_cfg_post_optimizing
1552 
1553 } // anon namespace
1554 
1555 gimple_opt_pass *
make_pass_cleanup_cfg_post_optimizing(gcc::context * ctxt)1556 make_pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1557 {
1558   return new pass_cleanup_cfg_post_optimizing (ctxt);
1559 }
1560 
1561 
1562 /* Delete all unreachable basic blocks and update callgraph.
1563    Doing so is somewhat nontrivial because we need to update all clones and
1564    remove inline function that become unreachable.  */
1565 
1566 bool
delete_unreachable_blocks_update_callgraph(cgraph_node * dst_node,bool update_clones)1567 delete_unreachable_blocks_update_callgraph (cgraph_node *dst_node,
1568 					    bool update_clones)
1569 {
1570   bool changed = false;
1571   basic_block b, next_bb;
1572 
1573   find_unreachable_blocks ();
1574 
1575   /* Delete all unreachable basic blocks.  */
1576 
1577   for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
1578        != EXIT_BLOCK_PTR_FOR_FN (cfun); b = next_bb)
1579     {
1580       next_bb = b->next_bb;
1581 
1582       if (!(b->flags & BB_REACHABLE))
1583 	{
1584           gimple_stmt_iterator bsi;
1585 
1586           for (bsi = gsi_start_bb (b); !gsi_end_p (bsi); gsi_next (&bsi))
1587 	    {
1588 	      struct cgraph_edge *e;
1589 	      struct cgraph_node *node;
1590 
1591 	      dst_node->remove_stmt_references (gsi_stmt (bsi));
1592 
1593 	      if (gimple_code (gsi_stmt (bsi)) == GIMPLE_CALL
1594 		  &&(e = dst_node->get_edge (gsi_stmt (bsi))) != NULL)
1595 		{
1596 		  if (!e->inline_failed)
1597 		    e->callee->remove_symbol_and_inline_clones (dst_node);
1598 		  else
1599 		    e->remove ();
1600 		}
1601 	      if (update_clones && dst_node->clones)
1602 		for (node = dst_node->clones; node != dst_node;)
1603 		  {
1604 		    node->remove_stmt_references (gsi_stmt (bsi));
1605 		    if (gimple_code (gsi_stmt (bsi)) == GIMPLE_CALL
1606 			&& (e = node->get_edge (gsi_stmt (bsi))) != NULL)
1607 		      {
1608 			if (!e->inline_failed)
1609 			  e->callee->remove_symbol_and_inline_clones (dst_node);
1610 			else
1611 			  e->remove ();
1612 		      }
1613 
1614 		    if (node->clones)
1615 		      node = node->clones;
1616 		    else if (node->next_sibling_clone)
1617 		      node = node->next_sibling_clone;
1618 		    else
1619 		      {
1620 			while (node != dst_node && !node->next_sibling_clone)
1621 			  node = node->clone_of;
1622 			if (node != dst_node)
1623 			  node = node->next_sibling_clone;
1624 		      }
1625 		  }
1626 	    }
1627 	  delete_basic_block (b);
1628 	  changed = true;
1629 	}
1630     }
1631 
1632   return changed;
1633 }
1634 
1635