1 /* Control flow optimization code for GNU compiler.
2    Copyright (C) 1987-2018 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* This file contains optimizer of the control flow.  The main entry point is
21    cleanup_cfg.  Following optimizations are performed:
22 
23    - Unreachable blocks removal
24    - Edge forwarding (edge to the forwarder block is forwarded to its
25      successor.  Simplification of the branch instruction is performed by
26      underlying infrastructure so branch can be converted to simplejump or
27      eliminated).
28    - Cross jumping (tail merging)
29    - Conditional jump-around-simplejump simplification
30    - Basic block merging.  */
31 
32 #include "config.h"
33 #include "system.h"
34 #include "coretypes.h"
35 #include "backend.h"
36 #include "target.h"
37 #include "rtl.h"
38 #include "tree.h"
39 #include "cfghooks.h"
40 #include "df.h"
41 #include "memmodel.h"
42 #include "tm_p.h"
43 #include "insn-config.h"
44 #include "emit-rtl.h"
45 #include "cselib.h"
46 #include "params.h"
47 #include "tree-pass.h"
48 #include "cfgloop.h"
49 #include "cfgrtl.h"
50 #include "cfganal.h"
51 #include "cfgbuild.h"
52 #include "cfgcleanup.h"
53 #include "dce.h"
54 #include "dbgcnt.h"
55 #include "rtl-iter.h"
56 #include "regs.h"
57 
58 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
59 
60 /* Set to true when we are running first pass of try_optimize_cfg loop.  */
61 static bool first_pass;
62 
63 /* Set to true if crossjumps occurred in the latest run of try_optimize_cfg.  */
64 static bool crossjumps_occurred;
65 
66 /* Set to true if we couldn't run an optimization due to stale liveness
67    information; we should run df_analyze to enable more opportunities.  */
68 static bool block_was_dirty;
69 
70 static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction);
71 static bool try_crossjump_bb (int, basic_block);
72 static bool outgoing_edges_match (int, basic_block, basic_block);
73 static enum replace_direction old_insns_match_p (int, rtx_insn *, rtx_insn *);
74 
75 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
76 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
77 static bool try_optimize_cfg (int);
78 static bool try_simplify_condjump (basic_block);
79 static bool try_forward_edges (int, basic_block);
80 static edge thread_jump (edge, basic_block);
81 static bool mark_effect (rtx, bitmap);
82 static void notice_new_block (basic_block);
83 static void update_forwarder_flag (basic_block);
84 static void merge_memattrs (rtx, rtx);
85 
86 /* Set flags for newly created block.  */
87 
88 static void
notice_new_block(basic_block bb)89 notice_new_block (basic_block bb)
90 {
91   if (!bb)
92     return;
93 
94   if (forwarder_block_p (bb))
95     bb->flags |= BB_FORWARDER_BLOCK;
96 }
97 
98 /* Recompute forwarder flag after block has been modified.  */
99 
100 static void
update_forwarder_flag(basic_block bb)101 update_forwarder_flag (basic_block bb)
102 {
103   if (forwarder_block_p (bb))
104     bb->flags |= BB_FORWARDER_BLOCK;
105   else
106     bb->flags &= ~BB_FORWARDER_BLOCK;
107 }
108 
109 /* Simplify a conditional jump around an unconditional jump.
110    Return true if something changed.  */
111 
112 static bool
try_simplify_condjump(basic_block cbranch_block)113 try_simplify_condjump (basic_block cbranch_block)
114 {
115   basic_block jump_block, jump_dest_block, cbranch_dest_block;
116   edge cbranch_jump_edge, cbranch_fallthru_edge;
117   rtx_insn *cbranch_insn;
118 
119   /* Verify that there are exactly two successors.  */
120   if (EDGE_COUNT (cbranch_block->succs) != 2)
121     return false;
122 
123   /* Verify that we've got a normal conditional branch at the end
124      of the block.  */
125   cbranch_insn = BB_END (cbranch_block);
126   if (!any_condjump_p (cbranch_insn))
127     return false;
128 
129   cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
130   cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
131 
132   /* The next block must not have multiple predecessors, must not
133      be the last block in the function, and must contain just the
134      unconditional jump.  */
135   jump_block = cbranch_fallthru_edge->dest;
136   if (!single_pred_p (jump_block)
137       || jump_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
138       || !FORWARDER_BLOCK_P (jump_block))
139     return false;
140   jump_dest_block = single_succ (jump_block);
141 
142   /* If we are partitioning hot/cold basic blocks, we don't want to
143      mess up unconditional or indirect jumps that cross between hot
144      and cold sections.
145 
146      Basic block partitioning may result in some jumps that appear to
147      be optimizable (or blocks that appear to be mergeable), but which really
148      must be left untouched (they are required to make it safely across
149      partition boundaries).  See the comments at the top of
150      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
151 
152   if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
153       || (cbranch_jump_edge->flags & EDGE_CROSSING))
154     return false;
155 
156   /* The conditional branch must target the block after the
157      unconditional branch.  */
158   cbranch_dest_block = cbranch_jump_edge->dest;
159 
160   if (cbranch_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
161       || jump_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
162       || !can_fallthru (jump_block, cbranch_dest_block))
163     return false;
164 
165   /* Invert the conditional branch.  */
166   if (!invert_jump (as_a <rtx_jump_insn *> (cbranch_insn),
167 		    block_label (jump_dest_block), 0))
168     return false;
169 
170   if (dump_file)
171     fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
172 	     INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
173 
174   /* Success.  Update the CFG to match.  Note that after this point
175      the edge variable names appear backwards; the redirection is done
176      this way to preserve edge profile data.  */
177   cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
178 						cbranch_dest_block);
179   cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
180 						    jump_dest_block);
181   cbranch_jump_edge->flags |= EDGE_FALLTHRU;
182   cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
183   update_br_prob_note (cbranch_block);
184 
185   /* Delete the block with the unconditional jump, and clean up the mess.  */
186   delete_basic_block (jump_block);
187   tidy_fallthru_edge (cbranch_jump_edge);
188   update_forwarder_flag (cbranch_block);
189 
190   return true;
191 }
192 
193 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
194    on register.  Used by jump threading.  */
195 
196 static bool
mark_effect(rtx exp,regset nonequal)197 mark_effect (rtx exp, regset nonequal)
198 {
199   rtx dest;
200   switch (GET_CODE (exp))
201     {
202       /* In case we do clobber the register, mark it as equal, as we know the
203 	 value is dead so it don't have to match.  */
204     case CLOBBER:
205       dest = XEXP (exp, 0);
206       if (REG_P (dest))
207 	bitmap_clear_range (nonequal, REGNO (dest), REG_NREGS (dest));
208       return false;
209 
210     case SET:
211       if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
212 	return false;
213       dest = SET_DEST (exp);
214       if (dest == pc_rtx)
215 	return false;
216       if (!REG_P (dest))
217 	return true;
218       bitmap_set_range (nonequal, REGNO (dest), REG_NREGS (dest));
219       return false;
220 
221     default:
222       return false;
223     }
224 }
225 
226 /* Return true if X contains a register in NONEQUAL.  */
227 static bool
mentions_nonequal_regs(const_rtx x,regset nonequal)228 mentions_nonequal_regs (const_rtx x, regset nonequal)
229 {
230   subrtx_iterator::array_type array;
231   FOR_EACH_SUBRTX (iter, array, x, NONCONST)
232     {
233       const_rtx x = *iter;
234       if (REG_P (x))
235 	{
236 	  unsigned int end_regno = END_REGNO (x);
237 	  for (unsigned int regno = REGNO (x); regno < end_regno; ++regno)
238 	    if (REGNO_REG_SET_P (nonequal, regno))
239 	      return true;
240 	}
241     }
242   return false;
243 }
244 
245 /* Attempt to prove that the basic block B will have no side effects and
246    always continues in the same edge if reached via E.  Return the edge
247    if exist, NULL otherwise.  */
248 
249 static edge
thread_jump(edge e,basic_block b)250 thread_jump (edge e, basic_block b)
251 {
252   rtx set1, set2, cond1, cond2;
253   rtx_insn *insn;
254   enum rtx_code code1, code2, reversed_code2;
255   bool reverse1 = false;
256   unsigned i;
257   regset nonequal;
258   bool failed = false;
259   reg_set_iterator rsi;
260 
261   if (b->flags & BB_NONTHREADABLE_BLOCK)
262     return NULL;
263 
264   /* At the moment, we do handle only conditional jumps, but later we may
265      want to extend this code to tablejumps and others.  */
266   if (EDGE_COUNT (e->src->succs) != 2)
267     return NULL;
268   if (EDGE_COUNT (b->succs) != 2)
269     {
270       b->flags |= BB_NONTHREADABLE_BLOCK;
271       return NULL;
272     }
273 
274   /* Second branch must end with onlyjump, as we will eliminate the jump.  */
275   if (!any_condjump_p (BB_END (e->src)))
276     return NULL;
277 
278   if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
279     {
280       b->flags |= BB_NONTHREADABLE_BLOCK;
281       return NULL;
282     }
283 
284   set1 = pc_set (BB_END (e->src));
285   set2 = pc_set (BB_END (b));
286   if (((e->flags & EDGE_FALLTHRU) != 0)
287       != (XEXP (SET_SRC (set1), 1) == pc_rtx))
288     reverse1 = true;
289 
290   cond1 = XEXP (SET_SRC (set1), 0);
291   cond2 = XEXP (SET_SRC (set2), 0);
292   if (reverse1)
293     code1 = reversed_comparison_code (cond1, BB_END (e->src));
294   else
295     code1 = GET_CODE (cond1);
296 
297   code2 = GET_CODE (cond2);
298   reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
299 
300   if (!comparison_dominates_p (code1, code2)
301       && !comparison_dominates_p (code1, reversed_code2))
302     return NULL;
303 
304   /* Ensure that the comparison operators are equivalent.
305      ??? This is far too pessimistic.  We should allow swapped operands,
306      different CCmodes, or for example comparisons for interval, that
307      dominate even when operands are not equivalent.  */
308   if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
309       || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
310     return NULL;
311 
312   /* Short circuit cases where block B contains some side effects, as we can't
313      safely bypass it.  */
314   for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
315        insn = NEXT_INSN (insn))
316     if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
317       {
318 	b->flags |= BB_NONTHREADABLE_BLOCK;
319 	return NULL;
320       }
321 
322   cselib_init (0);
323 
324   /* First process all values computed in the source basic block.  */
325   for (insn = NEXT_INSN (BB_HEAD (e->src));
326        insn != NEXT_INSN (BB_END (e->src));
327        insn = NEXT_INSN (insn))
328     if (INSN_P (insn))
329       cselib_process_insn (insn);
330 
331   nonequal = BITMAP_ALLOC (NULL);
332   CLEAR_REG_SET (nonequal);
333 
334   /* Now assume that we've continued by the edge E to B and continue
335      processing as if it were same basic block.
336      Our goal is to prove that whole block is an NOOP.  */
337 
338   for (insn = NEXT_INSN (BB_HEAD (b));
339        insn != NEXT_INSN (BB_END (b)) && !failed;
340        insn = NEXT_INSN (insn))
341     {
342       if (INSN_P (insn))
343 	{
344 	  rtx pat = PATTERN (insn);
345 
346 	  if (GET_CODE (pat) == PARALLEL)
347 	    {
348 	      for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
349 		failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
350 	    }
351 	  else
352 	    failed |= mark_effect (pat, nonequal);
353 	}
354 
355       cselib_process_insn (insn);
356     }
357 
358   /* Later we should clear nonequal of dead registers.  So far we don't
359      have life information in cfg_cleanup.  */
360   if (failed)
361     {
362       b->flags |= BB_NONTHREADABLE_BLOCK;
363       goto failed_exit;
364     }
365 
366   /* cond2 must not mention any register that is not equal to the
367      former block.  */
368   if (mentions_nonequal_regs (cond2, nonequal))
369     goto failed_exit;
370 
371   EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
372     goto failed_exit;
373 
374   BITMAP_FREE (nonequal);
375   cselib_finish ();
376   if ((comparison_dominates_p (code1, code2) != 0)
377       != (XEXP (SET_SRC (set2), 1) == pc_rtx))
378     return BRANCH_EDGE (b);
379   else
380     return FALLTHRU_EDGE (b);
381 
382 failed_exit:
383   BITMAP_FREE (nonequal);
384   cselib_finish ();
385   return NULL;
386 }
387 
388 /* Attempt to forward edges leaving basic block B.
389    Return true if successful.  */
390 
391 static bool
try_forward_edges(int mode,basic_block b)392 try_forward_edges (int mode, basic_block b)
393 {
394   bool changed = false;
395   edge_iterator ei;
396   edge e, *threaded_edges = NULL;
397 
398   for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
399     {
400       basic_block target, first;
401       location_t goto_locus;
402       int counter;
403       bool threaded = false;
404       int nthreaded_edges = 0;
405       bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0;
406       bool new_target_threaded = false;
407 
408       /* Skip complex edges because we don't know how to update them.
409 
410 	 Still handle fallthru edges, as we can succeed to forward fallthru
411 	 edge to the same place as the branch edge of conditional branch
412 	 and turn conditional branch to an unconditional branch.  */
413       if (e->flags & EDGE_COMPLEX)
414 	{
415 	  ei_next (&ei);
416 	  continue;
417 	}
418 
419       target = first = e->dest;
420       counter = NUM_FIXED_BLOCKS;
421       goto_locus = e->goto_locus;
422 
423       while (counter < n_basic_blocks_for_fn (cfun))
424 	{
425 	  basic_block new_target = NULL;
426 	  may_thread |= (target->flags & BB_MODIFIED) != 0;
427 
428 	  if (FORWARDER_BLOCK_P (target)
429 	      && single_succ (target) != EXIT_BLOCK_PTR_FOR_FN (cfun))
430 	    {
431 	      /* Bypass trivial infinite loops.  */
432 	      new_target = single_succ (target);
433 	      if (target == new_target)
434 		counter = n_basic_blocks_for_fn (cfun);
435 	      else if (!optimize)
436 		{
437 		  /* When not optimizing, ensure that edges or forwarder
438 		     blocks with different locus are not optimized out.  */
439 		  location_t new_locus = single_succ_edge (target)->goto_locus;
440 		  location_t locus = goto_locus;
441 
442 		  if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
443 		      && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
444 		      && new_locus != locus)
445 		    new_target = NULL;
446 		  else
447 		    {
448 		      if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
449 			locus = new_locus;
450 
451 		      rtx_insn *last = BB_END (target);
452 		      if (DEBUG_INSN_P (last))
453 			last = prev_nondebug_insn (last);
454 		      if (last && INSN_P (last))
455 			new_locus = INSN_LOCATION (last);
456 		      else
457 			new_locus = UNKNOWN_LOCATION;
458 
459 		      if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
460 			  && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
461 			  && new_locus != locus)
462 			new_target = NULL;
463 		      else
464 			{
465 			  if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
466 			    locus = new_locus;
467 
468 			  goto_locus = locus;
469 			}
470 		    }
471 		}
472 	    }
473 
474 	  /* Allow to thread only over one edge at time to simplify updating
475 	     of probabilities.  */
476 	  else if ((mode & CLEANUP_THREADING) && may_thread)
477 	    {
478 	      edge t = thread_jump (e, target);
479 	      if (t)
480 		{
481 		  if (!threaded_edges)
482 		    threaded_edges = XNEWVEC (edge,
483 					      n_basic_blocks_for_fn (cfun));
484 		  else
485 		    {
486 		      int i;
487 
488 		      /* Detect an infinite loop across blocks not
489 			 including the start block.  */
490 		      for (i = 0; i < nthreaded_edges; ++i)
491 			if (threaded_edges[i] == t)
492 			  break;
493 		      if (i < nthreaded_edges)
494 			{
495 			  counter = n_basic_blocks_for_fn (cfun);
496 			  break;
497 			}
498 		    }
499 
500 		  /* Detect an infinite loop across the start block.  */
501 		  if (t->dest == b)
502 		    break;
503 
504 		  gcc_assert (nthreaded_edges
505 			      < (n_basic_blocks_for_fn (cfun)
506 				 - NUM_FIXED_BLOCKS));
507 		  threaded_edges[nthreaded_edges++] = t;
508 
509 		  new_target = t->dest;
510 		  new_target_threaded = true;
511 		}
512 	    }
513 
514 	  if (!new_target)
515 	    break;
516 
517 	  counter++;
518 	  /* Do not turn non-crossing jump to crossing.  Depending on target
519 	     it may require different instruction pattern.  */
520 	  if ((e->flags & EDGE_CROSSING)
521 	      || BB_PARTITION (first) == BB_PARTITION (new_target))
522 	    {
523 	      target = new_target;
524 	      threaded |= new_target_threaded;
525 	    }
526 	}
527 
528       if (counter >= n_basic_blocks_for_fn (cfun))
529 	{
530 	  if (dump_file)
531 	    fprintf (dump_file, "Infinite loop in BB %i.\n",
532 		     target->index);
533 	}
534       else if (target == first)
535 	; /* We didn't do anything.  */
536       else
537 	{
538 	  /* Save the values now, as the edge may get removed.  */
539 	  profile_count edge_count = e->count ();
540 	  int n = 0;
541 
542 	  e->goto_locus = goto_locus;
543 
544 	  /* Don't force if target is exit block.  */
545 	  if (threaded && target != EXIT_BLOCK_PTR_FOR_FN (cfun))
546 	    {
547 	      notice_new_block (redirect_edge_and_branch_force (e, target));
548 	      if (dump_file)
549 		fprintf (dump_file, "Conditionals threaded.\n");
550 	    }
551 	  else if (!redirect_edge_and_branch (e, target))
552 	    {
553 	      if (dump_file)
554 		fprintf (dump_file,
555 			 "Forwarding edge %i->%i to %i failed.\n",
556 			 b->index, e->dest->index, target->index);
557 	      ei_next (&ei);
558 	      continue;
559 	    }
560 
561 	  /* We successfully forwarded the edge.  Now update profile
562 	     data: for each edge we traversed in the chain, remove
563 	     the original edge's execution count.  */
564 	  do
565 	    {
566 	      edge t;
567 
568 	      if (!single_succ_p (first))
569 		{
570 		  gcc_assert (n < nthreaded_edges);
571 		  t = threaded_edges [n++];
572 		  gcc_assert (t->src == first);
573 		  update_bb_profile_for_threading (first, edge_count, t);
574 		  update_br_prob_note (first);
575 		}
576 	      else
577 		{
578 		  first->count -= edge_count;
579 		  /* It is possible that as the result of
580 		     threading we've removed edge as it is
581 		     threaded to the fallthru edge.  Avoid
582 		     getting out of sync.  */
583 		  if (n < nthreaded_edges
584 		      && first == threaded_edges [n]->src)
585 		    n++;
586 		  t = single_succ_edge (first);
587 		}
588 
589 	      first = t->dest;
590 	    }
591 	  while (first != target);
592 
593 	  changed = true;
594 	  continue;
595 	}
596       ei_next (&ei);
597     }
598 
599   free (threaded_edges);
600   return changed;
601 }
602 
603 
604 /* Blocks A and B are to be merged into a single block.  A has no incoming
605    fallthru edge, so it can be moved before B without adding or modifying
606    any jumps (aside from the jump from A to B).  */
607 
608 static void
merge_blocks_move_predecessor_nojumps(basic_block a,basic_block b)609 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
610 {
611   rtx_insn *barrier;
612 
613   /* If we are partitioning hot/cold basic blocks, we don't want to
614      mess up unconditional or indirect jumps that cross between hot
615      and cold sections.
616 
617      Basic block partitioning may result in some jumps that appear to
618      be optimizable (or blocks that appear to be mergeable), but which really
619      must be left untouched (they are required to make it safely across
620      partition boundaries).  See the comments at the top of
621      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
622 
623   if (BB_PARTITION (a) != BB_PARTITION (b))
624     return;
625 
626   barrier = next_nonnote_insn (BB_END (a));
627   gcc_assert (BARRIER_P (barrier));
628   delete_insn (barrier);
629 
630   /* Scramble the insn chain.  */
631   if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
632     reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
633   df_set_bb_dirty (a);
634 
635   if (dump_file)
636     fprintf (dump_file, "Moved block %d before %d and merged.\n",
637 	     a->index, b->index);
638 
639   /* Swap the records for the two blocks around.  */
640 
641   unlink_block (a);
642   link_block (a, b->prev_bb);
643 
644   /* Now blocks A and B are contiguous.  Merge them.  */
645   merge_blocks (a, b);
646 }
647 
648 /* Blocks A and B are to be merged into a single block.  B has no outgoing
649    fallthru edge, so it can be moved after A without adding or modifying
650    any jumps (aside from the jump from A to B).  */
651 
652 static void
merge_blocks_move_successor_nojumps(basic_block a,basic_block b)653 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
654 {
655   rtx_insn *barrier, *real_b_end;
656   rtx_insn *label;
657   rtx_jump_table_data *table;
658 
659   /* If we are partitioning hot/cold basic blocks, we don't want to
660      mess up unconditional or indirect jumps that cross between hot
661      and cold sections.
662 
663      Basic block partitioning may result in some jumps that appear to
664      be optimizable (or blocks that appear to be mergeable), but which really
665      must be left untouched (they are required to make it safely across
666      partition boundaries).  See the comments at the top of
667      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
668 
669   if (BB_PARTITION (a) != BB_PARTITION (b))
670     return;
671 
672   real_b_end = BB_END (b);
673 
674   /* If there is a jump table following block B temporarily add the jump table
675      to block B so that it will also be moved to the correct location.  */
676   if (tablejump_p (BB_END (b), &label, &table)
677       && prev_active_insn (label) == BB_END (b))
678     {
679       BB_END (b) = table;
680     }
681 
682   /* There had better have been a barrier there.  Delete it.  */
683   barrier = NEXT_INSN (BB_END (b));
684   if (barrier && BARRIER_P (barrier))
685     delete_insn (barrier);
686 
687 
688   /* Scramble the insn chain.  */
689   reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
690 
691   /* Restore the real end of b.  */
692   BB_END (b) = real_b_end;
693 
694   if (dump_file)
695     fprintf (dump_file, "Moved block %d after %d and merged.\n",
696 	     b->index, a->index);
697 
698   /* Now blocks A and B are contiguous.  Merge them.  */
699   merge_blocks (a, b);
700 }
701 
702 /* Attempt to merge basic blocks that are potentially non-adjacent.
703    Return NULL iff the attempt failed, otherwise return basic block
704    where cleanup_cfg should continue.  Because the merging commonly
705    moves basic block away or introduces another optimization
706    possibility, return basic block just before B so cleanup_cfg don't
707    need to iterate.
708 
709    It may be good idea to return basic block before C in the case
710    C has been moved after B and originally appeared earlier in the
711    insn sequence, but we have no information available about the
712    relative ordering of these two.  Hopefully it is not too common.  */
713 
714 static basic_block
merge_blocks_move(edge e,basic_block b,basic_block c,int mode)715 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
716 {
717   basic_block next;
718 
719   /* If we are partitioning hot/cold basic blocks, we don't want to
720      mess up unconditional or indirect jumps that cross between hot
721      and cold sections.
722 
723      Basic block partitioning may result in some jumps that appear to
724      be optimizable (or blocks that appear to be mergeable), but which really
725      must be left untouched (they are required to make it safely across
726      partition boundaries).  See the comments at the top of
727      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
728 
729   if (BB_PARTITION (b) != BB_PARTITION (c))
730     return NULL;
731 
732   /* If B has a fallthru edge to C, no need to move anything.  */
733   if (e->flags & EDGE_FALLTHRU)
734     {
735       int b_index = b->index, c_index = c->index;
736 
737       /* Protect the loop latches.  */
738       if (current_loops && c->loop_father->latch == c)
739 	return NULL;
740 
741       merge_blocks (b, c);
742       update_forwarder_flag (b);
743 
744       if (dump_file)
745 	fprintf (dump_file, "Merged %d and %d without moving.\n",
746 		 b_index, c_index);
747 
748       return b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? b : b->prev_bb;
749     }
750 
751   /* Otherwise we will need to move code around.  Do that only if expensive
752      transformations are allowed.  */
753   else if (mode & CLEANUP_EXPENSIVE)
754     {
755       edge tmp_edge, b_fallthru_edge;
756       bool c_has_outgoing_fallthru;
757       bool b_has_incoming_fallthru;
758 
759       /* Avoid overactive code motion, as the forwarder blocks should be
760 	 eliminated by edge redirection instead.  One exception might have
761 	 been if B is a forwarder block and C has no fallthru edge, but
762 	 that should be cleaned up by bb-reorder instead.  */
763       if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
764 	return NULL;
765 
766       /* We must make sure to not munge nesting of lexical blocks,
767 	 and loop notes.  This is done by squeezing out all the notes
768 	 and leaving them there to lie.  Not ideal, but functional.  */
769 
770       tmp_edge = find_fallthru_edge (c->succs);
771       c_has_outgoing_fallthru = (tmp_edge != NULL);
772 
773       tmp_edge = find_fallthru_edge (b->preds);
774       b_has_incoming_fallthru = (tmp_edge != NULL);
775       b_fallthru_edge = tmp_edge;
776       next = b->prev_bb;
777       if (next == c)
778 	next = next->prev_bb;
779 
780       /* Otherwise, we're going to try to move C after B.  If C does
781 	 not have an outgoing fallthru, then it can be moved
782 	 immediately after B without introducing or modifying jumps.  */
783       if (! c_has_outgoing_fallthru)
784 	{
785 	  merge_blocks_move_successor_nojumps (b, c);
786 	  return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
787 	}
788 
789       /* If B does not have an incoming fallthru, then it can be moved
790 	 immediately before C without introducing or modifying jumps.
791 	 C cannot be the first block, so we do not have to worry about
792 	 accessing a non-existent block.  */
793 
794       if (b_has_incoming_fallthru)
795 	{
796 	  basic_block bb;
797 
798 	  if (b_fallthru_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
799 	    return NULL;
800 	  bb = force_nonfallthru (b_fallthru_edge);
801 	  if (bb)
802 	    notice_new_block (bb);
803 	}
804 
805       merge_blocks_move_predecessor_nojumps (b, c);
806       return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
807     }
808 
809   return NULL;
810 }
811 
812 
813 /* Removes the memory attributes of MEM expression
814    if they are not equal.  */
815 
816 static void
merge_memattrs(rtx x,rtx y)817 merge_memattrs (rtx x, rtx y)
818 {
819   int i;
820   int j;
821   enum rtx_code code;
822   const char *fmt;
823 
824   if (x == y)
825     return;
826   if (x == 0 || y == 0)
827     return;
828 
829   code = GET_CODE (x);
830 
831   if (code != GET_CODE (y))
832     return;
833 
834   if (GET_MODE (x) != GET_MODE (y))
835     return;
836 
837   if (code == MEM && !mem_attrs_eq_p (MEM_ATTRS (x), MEM_ATTRS (y)))
838     {
839       if (! MEM_ATTRS (x))
840 	MEM_ATTRS (y) = 0;
841       else if (! MEM_ATTRS (y))
842 	MEM_ATTRS (x) = 0;
843       else
844 	{
845 	  if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
846 	    {
847 	      set_mem_alias_set (x, 0);
848 	      set_mem_alias_set (y, 0);
849 	    }
850 
851 	  if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
852 	    {
853 	      set_mem_expr (x, 0);
854 	      set_mem_expr (y, 0);
855 	      clear_mem_offset (x);
856 	      clear_mem_offset (y);
857 	    }
858 	  else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y)
859 		   || (MEM_OFFSET_KNOWN_P (x)
860 		       && maybe_ne (MEM_OFFSET (x), MEM_OFFSET (y))))
861 	    {
862 	      clear_mem_offset (x);
863 	      clear_mem_offset (y);
864 	    }
865 
866 	  if (!MEM_SIZE_KNOWN_P (x))
867 	    clear_mem_size (y);
868 	  else if (!MEM_SIZE_KNOWN_P (y))
869 	    clear_mem_size (x);
870 	  else if (known_le (MEM_SIZE (x), MEM_SIZE (y)))
871 	    set_mem_size (x, MEM_SIZE (y));
872 	  else if (known_le (MEM_SIZE (y), MEM_SIZE (x)))
873 	    set_mem_size (y, MEM_SIZE (x));
874 	  else
875 	    {
876 	      /* The sizes aren't ordered, so we can't merge them.  */
877 	      clear_mem_size (x);
878 	      clear_mem_size (y);
879 	    }
880 
881 	  set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
882 	  set_mem_align (y, MEM_ALIGN (x));
883 	}
884     }
885   if (code == MEM)
886     {
887       if (MEM_READONLY_P (x) != MEM_READONLY_P (y))
888 	{
889 	  MEM_READONLY_P (x) = 0;
890 	  MEM_READONLY_P (y) = 0;
891 	}
892       if (MEM_NOTRAP_P (x) != MEM_NOTRAP_P (y))
893 	{
894 	  MEM_NOTRAP_P (x) = 0;
895 	  MEM_NOTRAP_P (y) = 0;
896 	}
897       if (MEM_VOLATILE_P (x) != MEM_VOLATILE_P (y))
898 	{
899 	  MEM_VOLATILE_P (x) = 1;
900 	  MEM_VOLATILE_P (y) = 1;
901 	}
902     }
903 
904   fmt = GET_RTX_FORMAT (code);
905   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
906     {
907       switch (fmt[i])
908 	{
909 	case 'E':
910 	  /* Two vectors must have the same length.  */
911 	  if (XVECLEN (x, i) != XVECLEN (y, i))
912 	    return;
913 
914 	  for (j = 0; j < XVECLEN (x, i); j++)
915 	    merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
916 
917 	  break;
918 
919 	case 'e':
920 	  merge_memattrs (XEXP (x, i), XEXP (y, i));
921 	}
922     }
923   return;
924 }
925 
926 
927  /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
928     different single sets S1 and S2.  */
929 
930 static bool
equal_different_set_p(rtx p1,rtx s1,rtx p2,rtx s2)931 equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2)
932 {
933   int i;
934   rtx e1, e2;
935 
936   if (p1 == s1 && p2 == s2)
937     return true;
938 
939   if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL)
940     return false;
941 
942   if (XVECLEN (p1, 0) != XVECLEN (p2, 0))
943     return false;
944 
945   for (i = 0; i < XVECLEN (p1, 0); i++)
946     {
947       e1 = XVECEXP (p1, 0, i);
948       e2 = XVECEXP (p2, 0, i);
949       if (e1 == s1 && e2 == s2)
950         continue;
951       if (reload_completed
952           ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2))
953         continue;
954 
955       return false;
956     }
957 
958   return true;
959 }
960 
961 
962 /* NOTE1 is the REG_EQUAL note, if any, attached to an insn
963    that is a single_set with a SET_SRC of SRC1.  Similarly
964    for NOTE2/SRC2.
965 
966    So effectively NOTE1/NOTE2 are an alternate form of
967    SRC1/SRC2 respectively.
968 
969    Return nonzero if SRC1 or NOTE1 has the same constant
970    integer value as SRC2 or NOTE2.   Else return zero.  */
971 static int
values_equal_p(rtx note1,rtx note2,rtx src1,rtx src2)972 values_equal_p (rtx note1, rtx note2, rtx src1, rtx src2)
973 {
974   if (note1
975       && note2
976       && CONST_INT_P (XEXP (note1, 0))
977       && rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0)))
978     return 1;
979 
980   if (!note1
981       && !note2
982       && CONST_INT_P (src1)
983       && CONST_INT_P (src2)
984       && rtx_equal_p (src1, src2))
985     return 1;
986 
987   if (note1
988       && CONST_INT_P (src2)
989       && rtx_equal_p (XEXP (note1, 0), src2))
990     return 1;
991 
992   if (note2
993       && CONST_INT_P (src1)
994       && rtx_equal_p (XEXP (note2, 0), src1))
995     return 1;
996 
997   return 0;
998 }
999 
1000 /* Examine register notes on I1 and I2 and return:
1001    - dir_forward if I1 can be replaced by I2, or
1002    - dir_backward if I2 can be replaced by I1, or
1003    - dir_both if both are the case.  */
1004 
1005 static enum replace_direction
can_replace_by(rtx_insn * i1,rtx_insn * i2)1006 can_replace_by (rtx_insn *i1, rtx_insn *i2)
1007 {
1008   rtx s1, s2, d1, d2, src1, src2, note1, note2;
1009   bool c1, c2;
1010 
1011   /* Check for 2 sets.  */
1012   s1 = single_set (i1);
1013   s2 = single_set (i2);
1014   if (s1 == NULL_RTX || s2 == NULL_RTX)
1015     return dir_none;
1016 
1017   /* Check that the 2 sets set the same dest.  */
1018   d1 = SET_DEST (s1);
1019   d2 = SET_DEST (s2);
1020   if (!(reload_completed
1021         ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2)))
1022     return dir_none;
1023 
1024   /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
1025      set dest to the same value.  */
1026   note1 = find_reg_equal_equiv_note (i1);
1027   note2 = find_reg_equal_equiv_note (i2);
1028 
1029   src1 = SET_SRC (s1);
1030   src2 = SET_SRC (s2);
1031 
1032   if (!values_equal_p (note1, note2, src1, src2))
1033     return dir_none;
1034 
1035   if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2))
1036     return dir_none;
1037 
1038   /* Although the 2 sets set dest to the same value, we cannot replace
1039        (set (dest) (const_int))
1040      by
1041        (set (dest) (reg))
1042      because we don't know if the reg is live and has the same value at the
1043      location of replacement.  */
1044   c1 = CONST_INT_P (src1);
1045   c2 = CONST_INT_P (src2);
1046   if (c1 && c2)
1047     return dir_both;
1048   else if (c2)
1049     return dir_forward;
1050   else if (c1)
1051     return dir_backward;
1052 
1053   return dir_none;
1054 }
1055 
1056 /* Merges directions A and B.  */
1057 
1058 static enum replace_direction
merge_dir(enum replace_direction a,enum replace_direction b)1059 merge_dir (enum replace_direction a, enum replace_direction b)
1060 {
1061   /* Implements the following table:
1062         |bo fw bw no
1063      ---+-----------
1064      bo |bo fw bw no
1065      fw |-- fw no no
1066      bw |-- -- bw no
1067      no |-- -- -- no.  */
1068 
1069   if (a == b)
1070     return a;
1071 
1072   if (a == dir_both)
1073     return b;
1074   if (b == dir_both)
1075     return a;
1076 
1077   return dir_none;
1078 }
1079 
1080 /* Array of flags indexed by reg note kind, true if the given
1081    reg note is CFA related.  */
1082 static const bool reg_note_cfa_p[] = {
1083 #undef REG_CFA_NOTE
1084 #define DEF_REG_NOTE(NAME) false,
1085 #define REG_CFA_NOTE(NAME) true,
1086 #include "reg-notes.def"
1087 #undef REG_CFA_NOTE
1088 #undef DEF_REG_NOTE
1089   false
1090 };
1091 
1092 /* Return true if I1 and I2 have identical CFA notes (the same order
1093    and equivalent content).  */
1094 
1095 static bool
insns_have_identical_cfa_notes(rtx_insn * i1,rtx_insn * i2)1096 insns_have_identical_cfa_notes (rtx_insn *i1, rtx_insn *i2)
1097 {
1098   rtx n1, n2;
1099   for (n1 = REG_NOTES (i1), n2 = REG_NOTES (i2); ;
1100        n1 = XEXP (n1, 1), n2 = XEXP (n2, 1))
1101     {
1102       /* Skip over reg notes not related to CFI information.  */
1103       while (n1 && !reg_note_cfa_p[REG_NOTE_KIND (n1)])
1104 	n1 = XEXP (n1, 1);
1105       while (n2 && !reg_note_cfa_p[REG_NOTE_KIND (n2)])
1106 	n2 = XEXP (n2, 1);
1107       if (n1 == NULL_RTX && n2 == NULL_RTX)
1108 	return true;
1109       if (n1 == NULL_RTX || n2 == NULL_RTX)
1110 	return false;
1111       if (XEXP (n1, 0) == XEXP (n2, 0))
1112 	;
1113       else if (XEXP (n1, 0) == NULL_RTX || XEXP (n2, 0) == NULL_RTX)
1114 	return false;
1115       else if (!(reload_completed
1116 		 ? rtx_renumbered_equal_p (XEXP (n1, 0), XEXP (n2, 0))
1117 		 : rtx_equal_p (XEXP (n1, 0), XEXP (n2, 0))))
1118 	return false;
1119     }
1120 }
1121 
1122 /* Examine I1 and I2 and return:
1123    - dir_forward if I1 can be replaced by I2, or
1124    - dir_backward if I2 can be replaced by I1, or
1125    - dir_both if both are the case.  */
1126 
1127 static enum replace_direction
old_insns_match_p(int mode ATTRIBUTE_UNUSED,rtx_insn * i1,rtx_insn * i2)1128 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx_insn *i1, rtx_insn *i2)
1129 {
1130   rtx p1, p2;
1131 
1132   /* Verify that I1 and I2 are equivalent.  */
1133   if (GET_CODE (i1) != GET_CODE (i2))
1134     return dir_none;
1135 
1136   /* __builtin_unreachable() may lead to empty blocks (ending with
1137      NOTE_INSN_BASIC_BLOCK).  They may be crossjumped. */
1138   if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
1139     return dir_both;
1140 
1141   /* ??? Do not allow cross-jumping between different stack levels.  */
1142   p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL);
1143   p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL);
1144   if (p1 && p2)
1145     {
1146       p1 = XEXP (p1, 0);
1147       p2 = XEXP (p2, 0);
1148       if (!rtx_equal_p (p1, p2))
1149         return dir_none;
1150 
1151       /* ??? Worse, this adjustment had better be constant lest we
1152          have differing incoming stack levels.  */
1153       if (!frame_pointer_needed
1154 	  && known_eq (find_args_size_adjust (i1), HOST_WIDE_INT_MIN))
1155 	return dir_none;
1156     }
1157   else if (p1 || p2)
1158     return dir_none;
1159 
1160   /* Do not allow cross-jumping between frame related insns and other
1161      insns.  */
1162   if (RTX_FRAME_RELATED_P (i1) != RTX_FRAME_RELATED_P (i2))
1163     return dir_none;
1164 
1165   p1 = PATTERN (i1);
1166   p2 = PATTERN (i2);
1167 
1168   if (GET_CODE (p1) != GET_CODE (p2))
1169     return dir_none;
1170 
1171   /* If this is a CALL_INSN, compare register usage information.
1172      If we don't check this on stack register machines, the two
1173      CALL_INSNs might be merged leaving reg-stack.c with mismatching
1174      numbers of stack registers in the same basic block.
1175      If we don't check this on machines with delay slots, a delay slot may
1176      be filled that clobbers a parameter expected by the subroutine.
1177 
1178      ??? We take the simple route for now and assume that if they're
1179      equal, they were constructed identically.
1180 
1181      Also check for identical exception regions.  */
1182 
1183   if (CALL_P (i1))
1184     {
1185       /* Ensure the same EH region.  */
1186       rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
1187       rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);
1188 
1189       if (!n1 && n2)
1190 	return dir_none;
1191 
1192       if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1193 	return dir_none;
1194 
1195       if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1196 			CALL_INSN_FUNCTION_USAGE (i2))
1197 	  || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
1198 	return dir_none;
1199 
1200       /* For address sanitizer, never crossjump __asan_report_* builtins,
1201 	 otherwise errors might be reported on incorrect lines.  */
1202       if (flag_sanitize & SANITIZE_ADDRESS)
1203 	{
1204 	  rtx call = get_call_rtx_from (i1);
1205 	  if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
1206 	    {
1207 	      rtx symbol = XEXP (XEXP (call, 0), 0);
1208 	      if (SYMBOL_REF_DECL (symbol)
1209 		  && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
1210 		{
1211 		  if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
1212 		       == BUILT_IN_NORMAL)
1213 		      && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1214 			 >= BUILT_IN_ASAN_REPORT_LOAD1
1215 		      && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1216 			 <= BUILT_IN_ASAN_STOREN)
1217 		    return dir_none;
1218 		}
1219 	    }
1220 	}
1221 
1222       HARD_REG_SET i1_used, i2_used;
1223 
1224       get_call_reg_set_usage (i1, &i1_used, call_used_reg_set);
1225       get_call_reg_set_usage (i2, &i2_used, call_used_reg_set);
1226 
1227       if (!hard_reg_set_equal_p (i1_used, i2_used))
1228         return dir_none;
1229     }
1230 
1231   /* If both i1 and i2 are frame related, verify all the CFA notes
1232      in the same order and with the same content.  */
1233   if (RTX_FRAME_RELATED_P (i1) && !insns_have_identical_cfa_notes (i1, i2))
1234     return dir_none;
1235 
1236 #ifdef STACK_REGS
1237   /* If cross_jump_death_matters is not 0, the insn's mode
1238      indicates whether or not the insn contains any stack-like
1239      regs.  */
1240 
1241   if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1242     {
1243       /* If register stack conversion has already been done, then
1244 	 death notes must also be compared before it is certain that
1245 	 the two instruction streams match.  */
1246 
1247       rtx note;
1248       HARD_REG_SET i1_regset, i2_regset;
1249 
1250       CLEAR_HARD_REG_SET (i1_regset);
1251       CLEAR_HARD_REG_SET (i2_regset);
1252 
1253       for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1254 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1255 	  SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1256 
1257       for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1258 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1259 	  SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1260 
1261       if (!hard_reg_set_equal_p (i1_regset, i2_regset))
1262 	return dir_none;
1263     }
1264 #endif
1265 
1266   if (reload_completed
1267       ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1268     return dir_both;
1269 
1270   return can_replace_by (i1, i2);
1271 }
1272 
1273 /* When comparing insns I1 and I2 in flow_find_cross_jump or
1274    flow_find_head_matching_sequence, ensure the notes match.  */
1275 
1276 static void
merge_notes(rtx_insn * i1,rtx_insn * i2)1277 merge_notes (rtx_insn *i1, rtx_insn *i2)
1278 {
1279   /* If the merged insns have different REG_EQUAL notes, then
1280      remove them.  */
1281   rtx equiv1 = find_reg_equal_equiv_note (i1);
1282   rtx equiv2 = find_reg_equal_equiv_note (i2);
1283 
1284   if (equiv1 && !equiv2)
1285     remove_note (i1, equiv1);
1286   else if (!equiv1 && equiv2)
1287     remove_note (i2, equiv2);
1288   else if (equiv1 && equiv2
1289 	   && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1290     {
1291       remove_note (i1, equiv1);
1292       remove_note (i2, equiv2);
1293     }
1294 }
1295 
1296  /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
1297     resulting insn in I1, and the corresponding bb in BB1.  At the head of a
1298     bb, if there is a predecessor bb that reaches this bb via fallthru, and
1299     FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
1300     DID_FALLTHRU.  Otherwise, stops at the head of the bb.  */
1301 
1302 static void
walk_to_nondebug_insn(rtx_insn ** i1,basic_block * bb1,bool follow_fallthru,bool * did_fallthru)1303 walk_to_nondebug_insn (rtx_insn **i1, basic_block *bb1, bool follow_fallthru,
1304                        bool *did_fallthru)
1305 {
1306   edge fallthru;
1307 
1308   *did_fallthru = false;
1309 
1310   /* Ignore notes.  */
1311   while (!NONDEBUG_INSN_P (*i1))
1312     {
1313       if (*i1 != BB_HEAD (*bb1))
1314         {
1315           *i1 = PREV_INSN (*i1);
1316           continue;
1317         }
1318 
1319       if (!follow_fallthru)
1320         return;
1321 
1322       fallthru = find_fallthru_edge ((*bb1)->preds);
1323       if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1324           || !single_succ_p (fallthru->src))
1325         return;
1326 
1327       *bb1 = fallthru->src;
1328       *i1 = BB_END (*bb1);
1329       *did_fallthru = true;
1330      }
1331 }
1332 
1333 /* Look through the insns at the end of BB1 and BB2 and find the longest
1334    sequence that are either equivalent, or allow forward or backward
1335    replacement.  Store the first insns for that sequence in *F1 and *F2 and
1336    return the sequence length.
1337 
1338    DIR_P indicates the allowed replacement direction on function entry, and
1339    the actual replacement direction on function exit.  If NULL, only equivalent
1340    sequences are allowed.
1341 
1342    To simplify callers of this function, if the blocks match exactly,
1343    store the head of the blocks in *F1 and *F2.  */
1344 
1345 int
flow_find_cross_jump(basic_block bb1,basic_block bb2,rtx_insn ** f1,rtx_insn ** f2,enum replace_direction * dir_p)1346 flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx_insn **f1,
1347 		      rtx_insn **f2, enum replace_direction *dir_p)
1348 {
1349   rtx_insn *i1, *i2, *last1, *last2, *afterlast1, *afterlast2;
1350   int ninsns = 0;
1351   enum replace_direction dir, last_dir, afterlast_dir;
1352   bool follow_fallthru, did_fallthru;
1353 
1354   if (dir_p)
1355     dir = *dir_p;
1356   else
1357     dir = dir_both;
1358   afterlast_dir = dir;
1359   last_dir = afterlast_dir;
1360 
1361   /* Skip simple jumps at the end of the blocks.  Complex jumps still
1362      need to be compared for equivalence, which we'll do below.  */
1363 
1364   i1 = BB_END (bb1);
1365   last1 = afterlast1 = last2 = afterlast2 = NULL;
1366   if (onlyjump_p (i1)
1367       || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1368     {
1369       last1 = i1;
1370       i1 = PREV_INSN (i1);
1371     }
1372 
1373   i2 = BB_END (bb2);
1374   if (onlyjump_p (i2)
1375       || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1376     {
1377       last2 = i2;
1378       /* Count everything except for unconditional jump as insn.
1379 	 Don't count any jumps if dir_p is NULL.  */
1380       if (!simplejump_p (i2) && !returnjump_p (i2) && last1 && dir_p)
1381 	ninsns++;
1382       i2 = PREV_INSN (i2);
1383     }
1384 
1385   while (true)
1386     {
1387       /* In the following example, we can replace all jumps to C by jumps to A.
1388 
1389          This removes 4 duplicate insns.
1390          [bb A] insn1            [bb C] insn1
1391                 insn2                   insn2
1392          [bb B] insn3                   insn3
1393                 insn4                   insn4
1394                 jump_insn               jump_insn
1395 
1396          We could also replace all jumps to A by jumps to C, but that leaves B
1397          alive, and removes only 2 duplicate insns.  In a subsequent crossjump
1398          step, all jumps to B would be replaced with jumps to the middle of C,
1399          achieving the same result with more effort.
1400          So we allow only the first possibility, which means that we don't allow
1401          fallthru in the block that's being replaced.  */
1402 
1403       follow_fallthru = dir_p && dir != dir_forward;
1404       walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru);
1405       if (did_fallthru)
1406         dir = dir_backward;
1407 
1408       follow_fallthru = dir_p && dir != dir_backward;
1409       walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru);
1410       if (did_fallthru)
1411         dir = dir_forward;
1412 
1413       if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1414 	break;
1415 
1416       /* Do not turn corssing edge to non-crossing or vice versa after
1417 	 reload. */
1418       if (BB_PARTITION (BLOCK_FOR_INSN (i1))
1419 	  != BB_PARTITION (BLOCK_FOR_INSN (i2))
1420 	  && reload_completed)
1421 	break;
1422 
1423       dir = merge_dir (dir, old_insns_match_p (0, i1, i2));
1424       if (dir == dir_none || (!dir_p && dir != dir_both))
1425 	break;
1426 
1427       merge_memattrs (i1, i2);
1428 
1429       /* Don't begin a cross-jump with a NOTE insn.  */
1430       if (INSN_P (i1))
1431 	{
1432 	  merge_notes (i1, i2);
1433 
1434 	  afterlast1 = last1, afterlast2 = last2;
1435 	  last1 = i1, last2 = i2;
1436 	  afterlast_dir = last_dir;
1437 	  last_dir = dir;
1438 	  if (active_insn_p (i1))
1439 	    ninsns++;
1440 	}
1441 
1442       i1 = PREV_INSN (i1);
1443       i2 = PREV_INSN (i2);
1444     }
1445 
1446   /* Don't allow the insn after a compare to be shared by
1447      cross-jumping unless the compare is also shared.  */
1448   if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
1449       && ! sets_cc0_p (last1))
1450     last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--;
1451 
1452   /* Include preceding notes and labels in the cross-jump.  One,
1453      this may bring us to the head of the blocks as requested above.
1454      Two, it keeps line number notes as matched as may be.  */
1455   if (ninsns)
1456     {
1457       bb1 = BLOCK_FOR_INSN (last1);
1458       while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
1459 	last1 = PREV_INSN (last1);
1460 
1461       if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1462 	last1 = PREV_INSN (last1);
1463 
1464       bb2 = BLOCK_FOR_INSN (last2);
1465       while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
1466 	last2 = PREV_INSN (last2);
1467 
1468       if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1469 	last2 = PREV_INSN (last2);
1470 
1471       *f1 = last1;
1472       *f2 = last2;
1473     }
1474 
1475   if (dir_p)
1476     *dir_p = last_dir;
1477   return ninsns;
1478 }
1479 
1480 /* Like flow_find_cross_jump, except start looking for a matching sequence from
1481    the head of the two blocks.  Do not include jumps at the end.
1482    If STOP_AFTER is nonzero, stop after finding that many matching
1483    instructions.  If STOP_AFTER is zero, count all INSN_P insns, if it is
1484    non-zero, only count active insns.  */
1485 
1486 int
flow_find_head_matching_sequence(basic_block bb1,basic_block bb2,rtx_insn ** f1,rtx_insn ** f2,int stop_after)1487 flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx_insn **f1,
1488 				  rtx_insn **f2, int stop_after)
1489 {
1490   rtx_insn *i1, *i2, *last1, *last2, *beforelast1, *beforelast2;
1491   int ninsns = 0;
1492   edge e;
1493   edge_iterator ei;
1494   int nehedges1 = 0, nehedges2 = 0;
1495 
1496   FOR_EACH_EDGE (e, ei, bb1->succs)
1497     if (e->flags & EDGE_EH)
1498       nehedges1++;
1499   FOR_EACH_EDGE (e, ei, bb2->succs)
1500     if (e->flags & EDGE_EH)
1501       nehedges2++;
1502 
1503   i1 = BB_HEAD (bb1);
1504   i2 = BB_HEAD (bb2);
1505   last1 = beforelast1 = last2 = beforelast2 = NULL;
1506 
1507   while (true)
1508     {
1509       /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG.  */
1510       while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
1511 	{
1512 	  if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG)
1513 	    break;
1514 	  i1 = NEXT_INSN (i1);
1515 	}
1516 
1517       while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
1518 	{
1519 	  if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG)
1520 	    break;
1521 	  i2 = NEXT_INSN (i2);
1522 	}
1523 
1524       if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1))
1525 	  || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2)))
1526 	break;
1527 
1528       if (NOTE_P (i1) || NOTE_P (i2)
1529 	  || JUMP_P (i1) || JUMP_P (i2))
1530 	break;
1531 
1532       /* A sanity check to make sure we're not merging insns with different
1533 	 effects on EH.  If only one of them ends a basic block, it shouldn't
1534 	 have an EH edge; if both end a basic block, there should be the same
1535 	 number of EH edges.  */
1536       if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
1537 	   && nehedges1 > 0)
1538 	  || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
1539 	      && nehedges2 > 0)
1540 	  || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
1541 	      && nehedges1 != nehedges2))
1542 	break;
1543 
1544       if (old_insns_match_p (0, i1, i2) != dir_both)
1545 	break;
1546 
1547       merge_memattrs (i1, i2);
1548 
1549       /* Don't begin a cross-jump with a NOTE insn.  */
1550       if (INSN_P (i1))
1551 	{
1552 	  merge_notes (i1, i2);
1553 
1554 	  beforelast1 = last1, beforelast2 = last2;
1555 	  last1 = i1, last2 = i2;
1556 	  if (!stop_after || active_insn_p (i1))
1557 	    ninsns++;
1558 	}
1559 
1560       if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
1561 	  || (stop_after > 0 && ninsns == stop_after))
1562 	break;
1563 
1564       i1 = NEXT_INSN (i1);
1565       i2 = NEXT_INSN (i2);
1566     }
1567 
1568   /* Don't allow a compare to be shared by cross-jumping unless the insn
1569      after the compare is also shared.  */
1570   if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
1571       && sets_cc0_p (last1))
1572     last1 = beforelast1, last2 = beforelast2, ninsns--;
1573 
1574   if (ninsns)
1575     {
1576       *f1 = last1;
1577       *f2 = last2;
1578     }
1579 
1580   return ninsns;
1581 }
1582 
1583 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1584    the branch instruction.  This means that if we commonize the control
1585    flow before end of the basic block, the semantic remains unchanged.
1586 
1587    We may assume that there exists one edge with a common destination.  */
1588 
1589 static bool
outgoing_edges_match(int mode,basic_block bb1,basic_block bb2)1590 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1591 {
1592   int nehedges1 = 0, nehedges2 = 0;
1593   edge fallthru1 = 0, fallthru2 = 0;
1594   edge e1, e2;
1595   edge_iterator ei;
1596 
1597   /* If we performed shrink-wrapping, edges to the exit block can
1598      only be distinguished for JUMP_INSNs.  The two paths may differ in
1599      whether they went through the prologue.  Sibcalls are fine, we know
1600      that we either didn't need or inserted an epilogue before them.  */
1601   if (crtl->shrink_wrapped
1602       && single_succ_p (bb1)
1603       && single_succ (bb1) == EXIT_BLOCK_PTR_FOR_FN (cfun)
1604       && (!JUMP_P (BB_END (bb1))
1605 	  /* Punt if the only successor is a fake edge to exit, the jump
1606 	     must be some weird one.  */
1607 	  || (single_succ_edge (bb1)->flags & EDGE_FAKE) != 0)
1608       && !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1))))
1609     return false;
1610 
1611   /* If BB1 has only one successor, we may be looking at either an
1612      unconditional jump, or a fake edge to exit.  */
1613   if (single_succ_p (bb1)
1614       && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1615       && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1616     return (single_succ_p (bb2)
1617 	    && (single_succ_edge (bb2)->flags
1618 		& (EDGE_COMPLEX | EDGE_FAKE)) == 0
1619 	    && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1620 
1621   /* Match conditional jumps - this may get tricky when fallthru and branch
1622      edges are crossed.  */
1623   if (EDGE_COUNT (bb1->succs) == 2
1624       && any_condjump_p (BB_END (bb1))
1625       && onlyjump_p (BB_END (bb1)))
1626     {
1627       edge b1, f1, b2, f2;
1628       bool reverse, match;
1629       rtx set1, set2, cond1, cond2;
1630       enum rtx_code code1, code2;
1631 
1632       if (EDGE_COUNT (bb2->succs) != 2
1633 	  || !any_condjump_p (BB_END (bb2))
1634 	  || !onlyjump_p (BB_END (bb2)))
1635 	return false;
1636 
1637       b1 = BRANCH_EDGE (bb1);
1638       b2 = BRANCH_EDGE (bb2);
1639       f1 = FALLTHRU_EDGE (bb1);
1640       f2 = FALLTHRU_EDGE (bb2);
1641 
1642       /* Get around possible forwarders on fallthru edges.  Other cases
1643 	 should be optimized out already.  */
1644       if (FORWARDER_BLOCK_P (f1->dest))
1645 	f1 = single_succ_edge (f1->dest);
1646 
1647       if (FORWARDER_BLOCK_P (f2->dest))
1648 	f2 = single_succ_edge (f2->dest);
1649 
1650       /* To simplify use of this function, return false if there are
1651 	 unneeded forwarder blocks.  These will get eliminated later
1652 	 during cleanup_cfg.  */
1653       if (FORWARDER_BLOCK_P (f1->dest)
1654 	  || FORWARDER_BLOCK_P (f2->dest)
1655 	  || FORWARDER_BLOCK_P (b1->dest)
1656 	  || FORWARDER_BLOCK_P (b2->dest))
1657 	return false;
1658 
1659       if (f1->dest == f2->dest && b1->dest == b2->dest)
1660 	reverse = false;
1661       else if (f1->dest == b2->dest && b1->dest == f2->dest)
1662 	reverse = true;
1663       else
1664 	return false;
1665 
1666       set1 = pc_set (BB_END (bb1));
1667       set2 = pc_set (BB_END (bb2));
1668       if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1669 	  != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1670 	reverse = !reverse;
1671 
1672       cond1 = XEXP (SET_SRC (set1), 0);
1673       cond2 = XEXP (SET_SRC (set2), 0);
1674       code1 = GET_CODE (cond1);
1675       if (reverse)
1676 	code2 = reversed_comparison_code (cond2, BB_END (bb2));
1677       else
1678 	code2 = GET_CODE (cond2);
1679 
1680       if (code2 == UNKNOWN)
1681 	return false;
1682 
1683       /* Verify codes and operands match.  */
1684       match = ((code1 == code2
1685 		&& rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1686 		&& rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1687 	       || (code1 == swap_condition (code2)
1688 		   && rtx_renumbered_equal_p (XEXP (cond1, 1),
1689 					      XEXP (cond2, 0))
1690 		   && rtx_renumbered_equal_p (XEXP (cond1, 0),
1691 					      XEXP (cond2, 1))));
1692 
1693       /* If we return true, we will join the blocks.  Which means that
1694 	 we will only have one branch prediction bit to work with.  Thus
1695 	 we require the existing branches to have probabilities that are
1696 	 roughly similar.  */
1697       if (match
1698 	  && optimize_bb_for_speed_p (bb1)
1699 	  && optimize_bb_for_speed_p (bb2))
1700 	{
1701 	  profile_probability prob2;
1702 
1703 	  if (b1->dest == b2->dest)
1704 	    prob2 = b2->probability;
1705 	  else
1706 	    /* Do not use f2 probability as f2 may be forwarded.  */
1707 	    prob2 = b2->probability.invert ();
1708 
1709 	  /* Fail if the difference in probabilities is greater than 50%.
1710 	     This rules out two well-predicted branches with opposite
1711 	     outcomes.  */
1712 	  if (b1->probability.differs_lot_from_p (prob2))
1713 	    {
1714 	      if (dump_file)
1715 		{
1716 		  fprintf (dump_file,
1717 			   "Outcomes of branch in bb %i and %i differ too"
1718 			   " much (", bb1->index, bb2->index);
1719 		  b1->probability.dump (dump_file);
1720 		  prob2.dump (dump_file);
1721 		  fprintf (dump_file, ")\n");
1722 		}
1723 	      return false;
1724 	    }
1725 	}
1726 
1727       if (dump_file && match)
1728 	fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1729 		 bb1->index, bb2->index);
1730 
1731       return match;
1732     }
1733 
1734   /* Generic case - we are seeing a computed jump, table jump or trapping
1735      instruction.  */
1736 
1737   /* Check whether there are tablejumps in the end of BB1 and BB2.
1738      Return true if they are identical.  */
1739     {
1740       rtx_insn *label1, *label2;
1741       rtx_jump_table_data *table1, *table2;
1742 
1743       if (tablejump_p (BB_END (bb1), &label1, &table1)
1744 	  && tablejump_p (BB_END (bb2), &label2, &table2)
1745 	  && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1746 	{
1747 	  /* The labels should never be the same rtx.  If they really are same
1748 	     the jump tables are same too. So disable crossjumping of blocks BB1
1749 	     and BB2 because when deleting the common insns in the end of BB1
1750 	     by delete_basic_block () the jump table would be deleted too.  */
1751 	  /* If LABEL2 is referenced in BB1->END do not do anything
1752 	     because we would loose information when replacing
1753 	     LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END.  */
1754 	  if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1755 	    {
1756 	      /* Set IDENTICAL to true when the tables are identical.  */
1757 	      bool identical = false;
1758 	      rtx p1, p2;
1759 
1760 	      p1 = PATTERN (table1);
1761 	      p2 = PATTERN (table2);
1762 	      if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1763 		{
1764 		  identical = true;
1765 		}
1766 	      else if (GET_CODE (p1) == ADDR_DIFF_VEC
1767 		       && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1768 		       && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1769 		       && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1770 		{
1771 		  int i;
1772 
1773 		  identical = true;
1774 		  for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1775 		    if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1776 		      identical = false;
1777 		}
1778 
1779 	      if (identical)
1780 		{
1781 		  bool match;
1782 
1783 		  /* Temporarily replace references to LABEL1 with LABEL2
1784 		     in BB1->END so that we could compare the instructions.  */
1785 		  replace_label_in_insn (BB_END (bb1), label1, label2, false);
1786 
1787 		  match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))
1788 			   == dir_both);
1789 		  if (dump_file && match)
1790 		    fprintf (dump_file,
1791 			     "Tablejumps in bb %i and %i match.\n",
1792 			     bb1->index, bb2->index);
1793 
1794 		  /* Set the original label in BB1->END because when deleting
1795 		     a block whose end is a tablejump, the tablejump referenced
1796 		     from the instruction is deleted too.  */
1797 		  replace_label_in_insn (BB_END (bb1), label2, label1, false);
1798 
1799 		  return match;
1800 		}
1801 	    }
1802 	  return false;
1803 	}
1804     }
1805 
1806   /* Find the last non-debug non-note instruction in each bb, except
1807      stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p
1808      handles that case specially. old_insns_match_p does not handle
1809      other types of instruction notes.  */
1810   rtx_insn *last1 = BB_END (bb1);
1811   rtx_insn *last2 = BB_END (bb2);
1812   while (!NOTE_INSN_BASIC_BLOCK_P (last1) &&
1813          (DEBUG_INSN_P (last1) || NOTE_P (last1)))
1814     last1 = PREV_INSN (last1);
1815   while (!NOTE_INSN_BASIC_BLOCK_P (last2) &&
1816          (DEBUG_INSN_P (last2) || NOTE_P (last2)))
1817     last2 = PREV_INSN (last2);
1818   gcc_assert (last1 && last2);
1819 
1820   /* First ensure that the instructions match.  There may be many outgoing
1821      edges so this test is generally cheaper.  */
1822   if (old_insns_match_p (mode, last1, last2) != dir_both)
1823     return false;
1824 
1825   /* Search the outgoing edges, ensure that the counts do match, find possible
1826      fallthru and exception handling edges since these needs more
1827      validation.  */
1828   if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1829     return false;
1830 
1831   bool nonfakeedges = false;
1832   FOR_EACH_EDGE (e1, ei, bb1->succs)
1833     {
1834       e2 = EDGE_SUCC (bb2, ei.index);
1835 
1836       if ((e1->flags & EDGE_FAKE) == 0)
1837 	nonfakeedges = true;
1838 
1839       if (e1->flags & EDGE_EH)
1840 	nehedges1++;
1841 
1842       if (e2->flags & EDGE_EH)
1843 	nehedges2++;
1844 
1845       if (e1->flags & EDGE_FALLTHRU)
1846 	fallthru1 = e1;
1847       if (e2->flags & EDGE_FALLTHRU)
1848 	fallthru2 = e2;
1849     }
1850 
1851   /* If number of edges of various types does not match, fail.  */
1852   if (nehedges1 != nehedges2
1853       || (fallthru1 != 0) != (fallthru2 != 0))
1854     return false;
1855 
1856   /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors
1857      and the last real insn doesn't have REG_ARGS_SIZE note, don't
1858      attempt to optimize, as the two basic blocks might have different
1859      REG_ARGS_SIZE depths.  For noreturn calls and unconditional
1860      traps there should be REG_ARG_SIZE notes, they could be missing
1861      for __builtin_unreachable () uses though.  */
1862   if (!nonfakeedges
1863       && !ACCUMULATE_OUTGOING_ARGS
1864       && (!INSN_P (last1)
1865           || !find_reg_note (last1, REG_ARGS_SIZE, NULL)))
1866     return false;
1867 
1868   /* fallthru edges must be forwarded to the same destination.  */
1869   if (fallthru1)
1870     {
1871       basic_block d1 = (forwarder_block_p (fallthru1->dest)
1872 			? single_succ (fallthru1->dest): fallthru1->dest);
1873       basic_block d2 = (forwarder_block_p (fallthru2->dest)
1874 			? single_succ (fallthru2->dest): fallthru2->dest);
1875 
1876       if (d1 != d2)
1877 	return false;
1878     }
1879 
1880   /* Ensure the same EH region.  */
1881   {
1882     rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1883     rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1884 
1885     if (!n1 && n2)
1886       return false;
1887 
1888     if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1889       return false;
1890   }
1891 
1892   /* The same checks as in try_crossjump_to_edge. It is required for RTL
1893      version of sequence abstraction.  */
1894   FOR_EACH_EDGE (e1, ei, bb2->succs)
1895     {
1896       edge e2;
1897       edge_iterator ei;
1898       basic_block d1 = e1->dest;
1899 
1900       if (FORWARDER_BLOCK_P (d1))
1901         d1 = EDGE_SUCC (d1, 0)->dest;
1902 
1903       FOR_EACH_EDGE (e2, ei, bb1->succs)
1904         {
1905           basic_block d2 = e2->dest;
1906           if (FORWARDER_BLOCK_P (d2))
1907             d2 = EDGE_SUCC (d2, 0)->dest;
1908           if (d1 == d2)
1909             break;
1910         }
1911 
1912       if (!e2)
1913         return false;
1914     }
1915 
1916   return true;
1917 }
1918 
1919 /* Returns true if BB basic block has a preserve label.  */
1920 
1921 static bool
block_has_preserve_label(basic_block bb)1922 block_has_preserve_label (basic_block bb)
1923 {
1924   return (bb
1925           && block_label (bb)
1926           && LABEL_PRESERVE_P (block_label (bb)));
1927 }
1928 
1929 /* E1 and E2 are edges with the same destination block.  Search their
1930    predecessors for common code.  If found, redirect control flow from
1931    (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
1932    or the other way around (dir_backward).  DIR specifies the allowed
1933    replacement direction.  */
1934 
1935 static bool
try_crossjump_to_edge(int mode,edge e1,edge e2,enum replace_direction dir)1936 try_crossjump_to_edge (int mode, edge e1, edge e2,
1937                        enum replace_direction dir)
1938 {
1939   int nmatch;
1940   basic_block src1 = e1->src, src2 = e2->src;
1941   basic_block redirect_to, redirect_from, to_remove;
1942   basic_block osrc1, osrc2, redirect_edges_to, tmp;
1943   rtx_insn *newpos1, *newpos2;
1944   edge s;
1945   edge_iterator ei;
1946 
1947   newpos1 = newpos2 = NULL;
1948 
1949   /* Search backward through forwarder blocks.  We don't need to worry
1950      about multiple entry or chained forwarders, as they will be optimized
1951      away.  We do this to look past the unconditional jump following a
1952      conditional jump that is required due to the current CFG shape.  */
1953   if (single_pred_p (src1)
1954       && FORWARDER_BLOCK_P (src1))
1955     e1 = single_pred_edge (src1), src1 = e1->src;
1956 
1957   if (single_pred_p (src2)
1958       && FORWARDER_BLOCK_P (src2))
1959     e2 = single_pred_edge (src2), src2 = e2->src;
1960 
1961   /* Nothing to do if we reach ENTRY, or a common source block.  */
1962   if (src1 == ENTRY_BLOCK_PTR_FOR_FN (cfun) || src2
1963       == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1964     return false;
1965   if (src1 == src2)
1966     return false;
1967 
1968   /* Seeing more than 1 forwarder blocks would confuse us later...  */
1969   if (FORWARDER_BLOCK_P (e1->dest)
1970       && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1971     return false;
1972 
1973   if (FORWARDER_BLOCK_P (e2->dest)
1974       && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1975     return false;
1976 
1977   /* Likewise with dead code (possibly newly created by the other optimizations
1978      of cfg_cleanup).  */
1979   if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1980     return false;
1981 
1982   /* Do not turn corssing edge to non-crossing or vice versa after reload.  */
1983   if (BB_PARTITION (src1) != BB_PARTITION (src2)
1984       && reload_completed)
1985     return false;
1986 
1987   /* Look for the common insn sequence, part the first ...  */
1988   if (!outgoing_edges_match (mode, src1, src2))
1989     return false;
1990 
1991   /* ... and part the second.  */
1992   nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir);
1993 
1994   osrc1 = src1;
1995   osrc2 = src2;
1996   if (newpos1 != NULL_RTX)
1997     src1 = BLOCK_FOR_INSN (newpos1);
1998   if (newpos2 != NULL_RTX)
1999     src2 = BLOCK_FOR_INSN (newpos2);
2000 
2001   /* Check that SRC1 and SRC2 have preds again.  They may have changed
2002      above due to the call to flow_find_cross_jump.  */
2003   if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
2004     return false;
2005 
2006   if (dir == dir_backward)
2007     {
2008       std::swap (osrc1, osrc2);
2009       std::swap (src1, src2);
2010       std::swap (e1, e2);
2011       std::swap (newpos1, newpos2);
2012     }
2013 
2014   /* Don't proceed with the crossjump unless we found a sufficient number
2015      of matching instructions or the 'from' block was totally matched
2016      (such that its predecessors will hopefully be redirected and the
2017      block removed).  */
2018   if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
2019       && (newpos1 != BB_HEAD (src1)))
2020     return false;
2021 
2022   /* Avoid deleting preserve label when redirecting ABNORMAL edges.  */
2023   if (block_has_preserve_label (e1->dest)
2024       && (e1->flags & EDGE_ABNORMAL))
2025     return false;
2026 
2027   /* Here we know that the insns in the end of SRC1 which are common with SRC2
2028      will be deleted.
2029      If we have tablejumps in the end of SRC1 and SRC2
2030      they have been already compared for equivalence in outgoing_edges_match ()
2031      so replace the references to TABLE1 by references to TABLE2.  */
2032   {
2033       rtx_insn *label1, *label2;
2034       rtx_jump_table_data *table1, *table2;
2035 
2036       if (tablejump_p (BB_END (osrc1), &label1, &table1)
2037 	  && tablejump_p (BB_END (osrc2), &label2, &table2)
2038 	  && label1 != label2)
2039 	{
2040 	  rtx_insn *insn;
2041 
2042 	  /* Replace references to LABEL1 with LABEL2.  */
2043 	  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2044 	    {
2045 	      /* Do not replace the label in SRC1->END because when deleting
2046 		 a block whose end is a tablejump, the tablejump referenced
2047 		 from the instruction is deleted too.  */
2048 	      if (insn != BB_END (osrc1))
2049 		replace_label_in_insn (insn, label1, label2, true);
2050 	    }
2051 	}
2052   }
2053 
2054   /* Avoid splitting if possible.  We must always split when SRC2 has
2055      EH predecessor edges, or we may end up with basic blocks with both
2056      normal and EH predecessor edges.  */
2057   if (newpos2 == BB_HEAD (src2)
2058       && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
2059     redirect_to = src2;
2060   else
2061     {
2062       if (newpos2 == BB_HEAD (src2))
2063 	{
2064 	  /* Skip possible basic block header.  */
2065 	  if (LABEL_P (newpos2))
2066 	    newpos2 = NEXT_INSN (newpos2);
2067 	  while (DEBUG_INSN_P (newpos2))
2068 	    newpos2 = NEXT_INSN (newpos2);
2069 	  if (NOTE_P (newpos2))
2070 	    newpos2 = NEXT_INSN (newpos2);
2071 	  while (DEBUG_INSN_P (newpos2))
2072 	    newpos2 = NEXT_INSN (newpos2);
2073 	}
2074 
2075       if (dump_file)
2076 	fprintf (dump_file, "Splitting bb %i before %i insns\n",
2077 		 src2->index, nmatch);
2078       redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
2079     }
2080 
2081   if (dump_file)
2082     fprintf (dump_file,
2083 	     "Cross jumping from bb %i to bb %i; %i common insns\n",
2084 	     src1->index, src2->index, nmatch);
2085 
2086   /* We may have some registers visible through the block.  */
2087   df_set_bb_dirty (redirect_to);
2088 
2089   if (osrc2 == src2)
2090     redirect_edges_to = redirect_to;
2091   else
2092     redirect_edges_to = osrc2;
2093 
2094   /* Recompute the counts of destinations of outgoing edges.  */
2095   FOR_EACH_EDGE (s, ei, redirect_edges_to->succs)
2096     {
2097       edge s2;
2098       edge_iterator ei;
2099       basic_block d = s->dest;
2100 
2101       if (FORWARDER_BLOCK_P (d))
2102 	d = single_succ (d);
2103 
2104       FOR_EACH_EDGE (s2, ei, src1->succs)
2105 	{
2106 	  basic_block d2 = s2->dest;
2107 	  if (FORWARDER_BLOCK_P (d2))
2108 	    d2 = single_succ (d2);
2109 	  if (d == d2)
2110 	    break;
2111 	}
2112 
2113       /* Take care to update possible forwarder blocks.  We verified
2114 	 that there is no more than one in the chain, so we can't run
2115 	 into infinite loop.  */
2116       if (FORWARDER_BLOCK_P (s->dest))
2117 	s->dest->count += s->count ();
2118 
2119       if (FORWARDER_BLOCK_P (s2->dest))
2120 	s2->dest->count -= s->count ();
2121 
2122       s->probability = s->probability.combine_with_count
2123 			  (redirect_edges_to->count,
2124 			   s2->probability, src1->count);
2125     }
2126 
2127   /* Adjust count for the block.  An earlier jump
2128      threading pass may have left the profile in an inconsistent
2129      state (see update_bb_profile_for_threading) so we must be
2130      prepared for overflows.  */
2131   tmp = redirect_to;
2132   do
2133     {
2134       tmp->count += src1->count;
2135       if (tmp == redirect_edges_to)
2136         break;
2137       tmp = find_fallthru_edge (tmp->succs)->dest;
2138     }
2139   while (true);
2140   update_br_prob_note (redirect_edges_to);
2141 
2142   /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1.  */
2143 
2144   /* Skip possible basic block header.  */
2145   if (LABEL_P (newpos1))
2146     newpos1 = NEXT_INSN (newpos1);
2147 
2148   while (DEBUG_INSN_P (newpos1))
2149     newpos1 = NEXT_INSN (newpos1);
2150 
2151   if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
2152     newpos1 = NEXT_INSN (newpos1);
2153 
2154   while (DEBUG_INSN_P (newpos1))
2155     newpos1 = NEXT_INSN (newpos1);
2156 
2157   redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
2158   to_remove = single_succ (redirect_from);
2159 
2160   redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
2161   delete_basic_block (to_remove);
2162 
2163   update_forwarder_flag (redirect_from);
2164   if (redirect_to != src2)
2165     update_forwarder_flag (src2);
2166 
2167   return true;
2168 }
2169 
2170 /* Search the predecessors of BB for common insn sequences.  When found,
2171    share code between them by redirecting control flow.  Return true if
2172    any changes made.  */
2173 
2174 static bool
try_crossjump_bb(int mode,basic_block bb)2175 try_crossjump_bb (int mode, basic_block bb)
2176 {
2177   edge e, e2, fallthru;
2178   bool changed;
2179   unsigned max, ix, ix2;
2180 
2181   /* Nothing to do if there is not at least two incoming edges.  */
2182   if (EDGE_COUNT (bb->preds) < 2)
2183     return false;
2184 
2185   /* Don't crossjump if this block ends in a computed jump,
2186      unless we are optimizing for size.  */
2187   if (optimize_bb_for_size_p (bb)
2188       && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
2189       && computed_jump_p (BB_END (bb)))
2190     return false;
2191 
2192   /* If we are partitioning hot/cold basic blocks, we don't want to
2193      mess up unconditional or indirect jumps that cross between hot
2194      and cold sections.
2195 
2196      Basic block partitioning may result in some jumps that appear to
2197      be optimizable (or blocks that appear to be mergeable), but which really
2198      must be left untouched (they are required to make it safely across
2199      partition boundaries).  See the comments at the top of
2200      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
2201 
2202   if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
2203 					BB_PARTITION (EDGE_PRED (bb, 1)->src)
2204       || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
2205     return false;
2206 
2207   /* It is always cheapest to redirect a block that ends in a branch to
2208      a block that falls through into BB, as that adds no branches to the
2209      program.  We'll try that combination first.  */
2210   fallthru = NULL;
2211   max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
2212 
2213   if (EDGE_COUNT (bb->preds) > max)
2214     return false;
2215 
2216   fallthru = find_fallthru_edge (bb->preds);
2217 
2218   changed = false;
2219   for (ix = 0; ix < EDGE_COUNT (bb->preds);)
2220     {
2221       e = EDGE_PRED (bb, ix);
2222       ix++;
2223 
2224       /* As noted above, first try with the fallthru predecessor (or, a
2225 	 fallthru predecessor if we are in cfglayout mode).  */
2226       if (fallthru)
2227 	{
2228 	  /* Don't combine the fallthru edge into anything else.
2229 	     If there is a match, we'll do it the other way around.  */
2230 	  if (e == fallthru)
2231 	    continue;
2232 	  /* If nothing changed since the last attempt, there is nothing
2233 	     we can do.  */
2234 	  if (!first_pass
2235 	      && !((e->src->flags & BB_MODIFIED)
2236 		   || (fallthru->src->flags & BB_MODIFIED)))
2237 	    continue;
2238 
2239 	  if (try_crossjump_to_edge (mode, e, fallthru, dir_forward))
2240 	    {
2241 	      changed = true;
2242 	      ix = 0;
2243 	      continue;
2244 	    }
2245 	}
2246 
2247       /* Non-obvious work limiting check: Recognize that we're going
2248 	 to call try_crossjump_bb on every basic block.  So if we have
2249 	 two blocks with lots of outgoing edges (a switch) and they
2250 	 share lots of common destinations, then we would do the
2251 	 cross-jump check once for each common destination.
2252 
2253 	 Now, if the blocks actually are cross-jump candidates, then
2254 	 all of their destinations will be shared.  Which means that
2255 	 we only need check them for cross-jump candidacy once.  We
2256 	 can eliminate redundant checks of crossjump(A,B) by arbitrarily
2257 	 choosing to do the check from the block for which the edge
2258 	 in question is the first successor of A.  */
2259       if (EDGE_SUCC (e->src, 0) != e)
2260 	continue;
2261 
2262       for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++)
2263 	{
2264 	  e2 = EDGE_PRED (bb, ix2);
2265 
2266 	  if (e2 == e)
2267 	    continue;
2268 
2269 	  /* We've already checked the fallthru edge above.  */
2270 	  if (e2 == fallthru)
2271 	    continue;
2272 
2273 	  /* The "first successor" check above only prevents multiple
2274 	     checks of crossjump(A,B).  In order to prevent redundant
2275 	     checks of crossjump(B,A), require that A be the block
2276 	     with the lowest index.  */
2277 	  if (e->src->index > e2->src->index)
2278 	    continue;
2279 
2280 	  /* If nothing changed since the last attempt, there is nothing
2281 	     we can do.  */
2282 	  if (!first_pass
2283 	      && !((e->src->flags & BB_MODIFIED)
2284 		   || (e2->src->flags & BB_MODIFIED)))
2285 	    continue;
2286 
2287 	  /* Both e and e2 are not fallthru edges, so we can crossjump in either
2288 	     direction.  */
2289 	  if (try_crossjump_to_edge (mode, e, e2, dir_both))
2290 	    {
2291 	      changed = true;
2292 	      ix = 0;
2293 	      break;
2294 	    }
2295 	}
2296     }
2297 
2298   if (changed)
2299     crossjumps_occurred = true;
2300 
2301   return changed;
2302 }
2303 
2304 /* Search the successors of BB for common insn sequences.  When found,
2305    share code between them by moving it across the basic block
2306    boundary.  Return true if any changes made.  */
2307 
2308 static bool
try_head_merge_bb(basic_block bb)2309 try_head_merge_bb (basic_block bb)
2310 {
2311   basic_block final_dest_bb = NULL;
2312   int max_match = INT_MAX;
2313   edge e0;
2314   rtx_insn **headptr, **currptr, **nextptr;
2315   bool changed, moveall;
2316   unsigned ix;
2317   rtx_insn *e0_last_head;
2318   rtx cond;
2319   rtx_insn *move_before;
2320   unsigned nedges = EDGE_COUNT (bb->succs);
2321   rtx_insn *jump = BB_END (bb);
2322   regset live, live_union;
2323 
2324   /* Nothing to do if there is not at least two outgoing edges.  */
2325   if (nedges < 2)
2326     return false;
2327 
2328   /* Don't crossjump if this block ends in a computed jump,
2329      unless we are optimizing for size.  */
2330   if (optimize_bb_for_size_p (bb)
2331       && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
2332       && computed_jump_p (BB_END (bb)))
2333     return false;
2334 
2335   cond = get_condition (jump, &move_before, true, false);
2336   if (cond == NULL_RTX)
2337     {
2338       if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2339 	move_before = prev_nonnote_nondebug_insn (jump);
2340       else
2341 	move_before = jump;
2342     }
2343 
2344   for (ix = 0; ix < nedges; ix++)
2345     if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
2346       return false;
2347 
2348   for (ix = 0; ix < nedges; ix++)
2349     {
2350       edge e = EDGE_SUCC (bb, ix);
2351       basic_block other_bb = e->dest;
2352 
2353       if (df_get_bb_dirty (other_bb))
2354 	{
2355 	  block_was_dirty = true;
2356 	  return false;
2357 	}
2358 
2359       if (e->flags & EDGE_ABNORMAL)
2360 	return false;
2361 
2362       /* Normally, all destination blocks must only be reachable from this
2363 	 block, i.e. they must have one incoming edge.
2364 
2365 	 There is one special case we can handle, that of multiple consecutive
2366 	 jumps where the first jumps to one of the targets of the second jump.
2367 	 This happens frequently in switch statements for default labels.
2368 	 The structure is as follows:
2369 	 FINAL_DEST_BB
2370 	 ....
2371 	 if (cond) jump A;
2372 	 fall through
2373 	 BB
2374 	 jump with targets A, B, C, D...
2375 	 A
2376 	 has two incoming edges, from FINAL_DEST_BB and BB
2377 
2378 	 In this case, we can try to move the insns through BB and into
2379 	 FINAL_DEST_BB.  */
2380       if (EDGE_COUNT (other_bb->preds) != 1)
2381 	{
2382 	  edge incoming_edge, incoming_bb_other_edge;
2383 	  edge_iterator ei;
2384 
2385 	  if (final_dest_bb != NULL
2386 	      || EDGE_COUNT (other_bb->preds) != 2)
2387 	    return false;
2388 
2389 	  /* We must be able to move the insns across the whole block.  */
2390 	  move_before = BB_HEAD (bb);
2391 	  while (!NONDEBUG_INSN_P (move_before))
2392 	    move_before = NEXT_INSN (move_before);
2393 
2394 	  if (EDGE_COUNT (bb->preds) != 1)
2395 	    return false;
2396 	  incoming_edge = EDGE_PRED (bb, 0);
2397 	  final_dest_bb = incoming_edge->src;
2398 	  if (EDGE_COUNT (final_dest_bb->succs) != 2)
2399 	    return false;
2400 	  FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs)
2401 	    if (incoming_bb_other_edge != incoming_edge)
2402 	      break;
2403 	  if (incoming_bb_other_edge->dest != other_bb)
2404 	    return false;
2405 	}
2406     }
2407 
2408   e0 = EDGE_SUCC (bb, 0);
2409   e0_last_head = NULL;
2410   changed = false;
2411 
2412   for (ix = 1; ix < nedges; ix++)
2413     {
2414       edge e = EDGE_SUCC (bb, ix);
2415       rtx_insn *e0_last, *e_last;
2416       int nmatch;
2417 
2418       nmatch = flow_find_head_matching_sequence (e0->dest, e->dest,
2419 						 &e0_last, &e_last, 0);
2420       if (nmatch == 0)
2421 	return false;
2422 
2423       if (nmatch < max_match)
2424 	{
2425 	  max_match = nmatch;
2426 	  e0_last_head = e0_last;
2427 	}
2428     }
2429 
2430   /* If we matched an entire block, we probably have to avoid moving the
2431      last insn.  */
2432   if (max_match > 0
2433       && e0_last_head == BB_END (e0->dest)
2434       && (find_reg_note (e0_last_head, REG_EH_REGION, 0)
2435 	  || control_flow_insn_p (e0_last_head)))
2436     {
2437       max_match--;
2438       if (max_match == 0)
2439 	return false;
2440       e0_last_head = prev_real_nondebug_insn (e0_last_head);
2441     }
2442 
2443   if (max_match == 0)
2444     return false;
2445 
2446   /* We must find a union of the live registers at each of the end points.  */
2447   live = BITMAP_ALLOC (NULL);
2448   live_union = BITMAP_ALLOC (NULL);
2449 
2450   currptr = XNEWVEC (rtx_insn *, nedges);
2451   headptr = XNEWVEC (rtx_insn *, nedges);
2452   nextptr = XNEWVEC (rtx_insn *, nedges);
2453 
2454   for (ix = 0; ix < nedges; ix++)
2455     {
2456       int j;
2457       basic_block merge_bb = EDGE_SUCC (bb, ix)->dest;
2458       rtx_insn *head = BB_HEAD (merge_bb);
2459 
2460       while (!NONDEBUG_INSN_P (head))
2461 	head = NEXT_INSN (head);
2462       headptr[ix] = head;
2463       currptr[ix] = head;
2464 
2465       /* Compute the end point and live information  */
2466       for (j = 1; j < max_match; j++)
2467 	do
2468 	  head = NEXT_INSN (head);
2469 	while (!NONDEBUG_INSN_P (head));
2470       simulate_backwards_to_point (merge_bb, live, head);
2471       IOR_REG_SET (live_union, live);
2472     }
2473 
2474   /* If we're moving across two blocks, verify the validity of the
2475      first move, then adjust the target and let the loop below deal
2476      with the final move.  */
2477   if (final_dest_bb != NULL)
2478     {
2479       rtx_insn *move_upto;
2480 
2481       moveall = can_move_insns_across (currptr[0], e0_last_head, move_before,
2482 				       jump, e0->dest, live_union,
2483 				       NULL, &move_upto);
2484       if (!moveall)
2485 	{
2486 	  if (move_upto == NULL_RTX)
2487 	    goto out;
2488 
2489 	  while (e0_last_head != move_upto)
2490 	    {
2491 	      df_simulate_one_insn_backwards (e0->dest, e0_last_head,
2492 					      live_union);
2493 	      e0_last_head = PREV_INSN (e0_last_head);
2494 	    }
2495 	}
2496       if (e0_last_head == NULL_RTX)
2497 	goto out;
2498 
2499       jump = BB_END (final_dest_bb);
2500       cond = get_condition (jump, &move_before, true, false);
2501       if (cond == NULL_RTX)
2502 	{
2503 	  if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2504 	    move_before = prev_nonnote_nondebug_insn (jump);
2505 	  else
2506 	    move_before = jump;
2507 	}
2508     }
2509 
2510   do
2511     {
2512       rtx_insn *move_upto;
2513       moveall = can_move_insns_across (currptr[0], e0_last_head,
2514 				       move_before, jump, e0->dest, live_union,
2515 				       NULL, &move_upto);
2516       if (!moveall && move_upto == NULL_RTX)
2517 	{
2518 	  if (jump == move_before)
2519 	    break;
2520 
2521 	  /* Try again, using a different insertion point.  */
2522 	  move_before = jump;
2523 
2524 	  /* Don't try moving before a cc0 user, as that may invalidate
2525 	     the cc0.  */
2526 	  if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2527 	    break;
2528 
2529 	  continue;
2530 	}
2531 
2532       if (final_dest_bb && !moveall)
2533 	/* We haven't checked whether a partial move would be OK for the first
2534 	   move, so we have to fail this case.  */
2535 	break;
2536 
2537       changed = true;
2538       for (;;)
2539 	{
2540 	  if (currptr[0] == move_upto)
2541 	    break;
2542 	  for (ix = 0; ix < nedges; ix++)
2543 	    {
2544 	      rtx_insn *curr = currptr[ix];
2545 	      do
2546 		curr = NEXT_INSN (curr);
2547 	      while (!NONDEBUG_INSN_P (curr));
2548 	      currptr[ix] = curr;
2549 	    }
2550 	}
2551 
2552       /* If we can't currently move all of the identical insns, remember
2553 	 each insn after the range that we'll merge.  */
2554       if (!moveall)
2555 	for (ix = 0; ix < nedges; ix++)
2556 	  {
2557 	    rtx_insn *curr = currptr[ix];
2558 	    do
2559 	      curr = NEXT_INSN (curr);
2560 	    while (!NONDEBUG_INSN_P (curr));
2561 	    nextptr[ix] = curr;
2562 	  }
2563 
2564       reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before));
2565       df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest);
2566       if (final_dest_bb != NULL)
2567 	df_set_bb_dirty (final_dest_bb);
2568       df_set_bb_dirty (bb);
2569       for (ix = 1; ix < nedges; ix++)
2570 	{
2571 	  df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest);
2572 	  delete_insn_chain (headptr[ix], currptr[ix], false);
2573 	}
2574       if (!moveall)
2575 	{
2576 	  if (jump == move_before)
2577 	    break;
2578 
2579 	  /* For the unmerged insns, try a different insertion point.  */
2580 	  move_before = jump;
2581 
2582 	  /* Don't try moving before a cc0 user, as that may invalidate
2583 	     the cc0.  */
2584 	  if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2585 	    break;
2586 
2587 	  for (ix = 0; ix < nedges; ix++)
2588 	    currptr[ix] = headptr[ix] = nextptr[ix];
2589 	}
2590     }
2591   while (!moveall);
2592 
2593  out:
2594   free (currptr);
2595   free (headptr);
2596   free (nextptr);
2597 
2598   crossjumps_occurred |= changed;
2599 
2600   return changed;
2601 }
2602 
2603 /* Return true if BB contains just bb note, or bb note followed
2604    by only DEBUG_INSNs.  */
2605 
2606 static bool
trivially_empty_bb_p(basic_block bb)2607 trivially_empty_bb_p (basic_block bb)
2608 {
2609   rtx_insn *insn = BB_END (bb);
2610 
2611   while (1)
2612     {
2613       if (insn == BB_HEAD (bb))
2614 	return true;
2615       if (!DEBUG_INSN_P (insn))
2616 	return false;
2617       insn = PREV_INSN (insn);
2618     }
2619 }
2620 
2621 /* Return true if BB contains just a return and possibly a USE of the
2622    return value.  Fill in *RET and *USE with the return and use insns
2623    if any found, otherwise NULL.  All CLOBBERs are ignored.  */
2624 
2625 static bool
bb_is_just_return(basic_block bb,rtx_insn ** ret,rtx_insn ** use)2626 bb_is_just_return (basic_block bb, rtx_insn **ret, rtx_insn **use)
2627 {
2628   *ret = *use = NULL;
2629   rtx_insn *insn;
2630 
2631   if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
2632     return false;
2633 
2634   FOR_BB_INSNS (bb, insn)
2635     if (NONDEBUG_INSN_P (insn))
2636       {
2637 	rtx pat = PATTERN (insn);
2638 
2639 	if (!*ret && ANY_RETURN_P (pat))
2640 	  *ret = insn;
2641 	else if (!*ret && !*use && GET_CODE (pat) == USE
2642 	    && REG_P (XEXP (pat, 0))
2643 	    && REG_FUNCTION_VALUE_P (XEXP (pat, 0)))
2644 	  *use = insn;
2645 	else if (GET_CODE (pat) != CLOBBER)
2646 	  return false;
2647       }
2648 
2649   return !!*ret;
2650 }
2651 
2652 /* Do simple CFG optimizations - basic block merging, simplifying of jump
2653    instructions etc.  Return nonzero if changes were made.  */
2654 
2655 static bool
try_optimize_cfg(int mode)2656 try_optimize_cfg (int mode)
2657 {
2658   bool changed_overall = false;
2659   bool changed;
2660   int iterations = 0;
2661   basic_block bb, b, next;
2662 
2663   if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
2664     clear_bb_flags ();
2665 
2666   crossjumps_occurred = false;
2667 
2668   FOR_EACH_BB_FN (bb, cfun)
2669     update_forwarder_flag (bb);
2670 
2671   if (! targetm.cannot_modify_jumps_p ())
2672     {
2673       first_pass = true;
2674       /* Attempt to merge blocks as made possible by edge removal.  If
2675 	 a block has only one successor, and the successor has only
2676 	 one predecessor, they may be combined.  */
2677       do
2678 	{
2679 	  block_was_dirty = false;
2680 	  changed = false;
2681 	  iterations++;
2682 
2683 	  if (dump_file)
2684 	    fprintf (dump_file,
2685 		     "\n\ntry_optimize_cfg iteration %i\n\n",
2686 		     iterations);
2687 
2688 	  for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
2689 	       != EXIT_BLOCK_PTR_FOR_FN (cfun);)
2690 	    {
2691 	      basic_block c;
2692 	      edge s;
2693 	      bool changed_here = false;
2694 
2695 	      /* Delete trivially dead basic blocks.  This is either
2696 		 blocks with no predecessors, or empty blocks with no
2697 		 successors.  However if the empty block with no
2698 		 successors is the successor of the ENTRY_BLOCK, it is
2699 		 kept.  This ensures that the ENTRY_BLOCK will have a
2700 		 successor which is a precondition for many RTL
2701 		 passes.  Empty blocks may result from expanding
2702 		 __builtin_unreachable ().  */
2703 	      if (EDGE_COUNT (b->preds) == 0
2704 		  || (EDGE_COUNT (b->succs) == 0
2705 		      && trivially_empty_bb_p (b)
2706 		      && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->dest
2707 		      != b))
2708 		{
2709 		  c = b->prev_bb;
2710 		  if (EDGE_COUNT (b->preds) > 0)
2711 		    {
2712 		      edge e;
2713 		      edge_iterator ei;
2714 
2715 		      if (current_ir_type () == IR_RTL_CFGLAYOUT)
2716 			{
2717 			  rtx_insn *insn;
2718 			  for (insn = BB_FOOTER (b);
2719 			       insn; insn = NEXT_INSN (insn))
2720 			    if (BARRIER_P (insn))
2721 			      break;
2722 			  if (insn)
2723 			    FOR_EACH_EDGE (e, ei, b->preds)
2724 			      if ((e->flags & EDGE_FALLTHRU))
2725 				{
2726 				  if (BB_FOOTER (b)
2727 				      && BB_FOOTER (e->src) == NULL)
2728 				    {
2729 				      BB_FOOTER (e->src) = BB_FOOTER (b);
2730 				      BB_FOOTER (b) = NULL;
2731 				    }
2732 				  else
2733 				    emit_barrier_after_bb (e->src);
2734 				}
2735 			}
2736 		      else
2737 			{
2738 			  rtx_insn *last = get_last_bb_insn (b);
2739 			  if (last && BARRIER_P (last))
2740 			    FOR_EACH_EDGE (e, ei, b->preds)
2741 			      if ((e->flags & EDGE_FALLTHRU))
2742 				emit_barrier_after (BB_END (e->src));
2743 			}
2744 		    }
2745 		  delete_basic_block (b);
2746 		  changed = true;
2747 		  /* Avoid trying to remove the exit block.  */
2748 		  b = (c == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? c->next_bb : c);
2749 		  continue;
2750 		}
2751 
2752 	      /* Remove code labels no longer used.  */
2753 	      if (single_pred_p (b)
2754 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2755 		  && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
2756 		  && LABEL_P (BB_HEAD (b))
2757 		  && !LABEL_PRESERVE_P (BB_HEAD (b))
2758 		  /* If the previous block ends with a branch to this
2759 		     block, we can't delete the label.  Normally this
2760 		     is a condjump that is yet to be simplified, but
2761 		     if CASE_DROPS_THRU, this can be a tablejump with
2762 		     some element going to the same place as the
2763 		     default (fallthru).  */
2764 		  && (single_pred (b) == ENTRY_BLOCK_PTR_FOR_FN (cfun)
2765 		      || !JUMP_P (BB_END (single_pred (b)))
2766 		      || ! label_is_jump_target_p (BB_HEAD (b),
2767 						   BB_END (single_pred (b)))))
2768 		{
2769 		  delete_insn (BB_HEAD (b));
2770 		  if (dump_file)
2771 		    fprintf (dump_file, "Deleted label in block %i.\n",
2772 			     b->index);
2773 		}
2774 
2775 	      /* If we fall through an empty block, we can remove it.  */
2776 	      if (!(mode & (CLEANUP_CFGLAYOUT | CLEANUP_NO_INSN_DEL))
2777 		  && single_pred_p (b)
2778 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2779 		  && !LABEL_P (BB_HEAD (b))
2780 		  && FORWARDER_BLOCK_P (b)
2781 		  /* Note that forwarder_block_p true ensures that
2782 		     there is a successor for this block.  */
2783 		  && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2784 		  && n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS + 1)
2785 		{
2786 		  if (dump_file)
2787 		    fprintf (dump_file,
2788 			     "Deleting fallthru block %i.\n",
2789 			     b->index);
2790 
2791 		  c = ((b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2792 		       ? b->next_bb : b->prev_bb);
2793 		  redirect_edge_succ_nodup (single_pred_edge (b),
2794 					    single_succ (b));
2795 		  delete_basic_block (b);
2796 		  changed = true;
2797 		  b = c;
2798 		  continue;
2799 		}
2800 
2801 	      /* Merge B with its single successor, if any.  */
2802 	      if (single_succ_p (b)
2803 		  && (s = single_succ_edge (b))
2804 		  && !(s->flags & EDGE_COMPLEX)
2805 		  && (c = s->dest) != EXIT_BLOCK_PTR_FOR_FN (cfun)
2806 		  && single_pred_p (c)
2807 		  && b != c)
2808 		{
2809 		  /* When not in cfg_layout mode use code aware of reordering
2810 		     INSN.  This code possibly creates new basic blocks so it
2811 		     does not fit merge_blocks interface and is kept here in
2812 		     hope that it will become useless once more of compiler
2813 		     is transformed to use cfg_layout mode.  */
2814 
2815 		  if ((mode & CLEANUP_CFGLAYOUT)
2816 		      && can_merge_blocks_p (b, c))
2817 		    {
2818 		      merge_blocks (b, c);
2819 		      update_forwarder_flag (b);
2820 		      changed_here = true;
2821 		    }
2822 		  else if (!(mode & CLEANUP_CFGLAYOUT)
2823 			   /* If the jump insn has side effects,
2824 			      we can't kill the edge.  */
2825 			   && (!JUMP_P (BB_END (b))
2826 			       || (reload_completed
2827 				   ? simplejump_p (BB_END (b))
2828 				   : (onlyjump_p (BB_END (b))
2829 				      && !tablejump_p (BB_END (b),
2830 						       NULL, NULL))))
2831 			   && (next = merge_blocks_move (s, b, c, mode)))
2832 		      {
2833 			b = next;
2834 			changed_here = true;
2835 		      }
2836 		}
2837 
2838 	      /* Try to change a branch to a return to just that return.  */
2839 	      rtx_insn *ret, *use;
2840 	      if (single_succ_p (b)
2841 		  && onlyjump_p (BB_END (b))
2842 		  && bb_is_just_return (single_succ (b), &ret, &use))
2843 		{
2844 		  if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2845 				     PATTERN (ret), 0))
2846 		    {
2847 		      if (use)
2848 			emit_insn_before (copy_insn (PATTERN (use)),
2849 					  BB_END (b));
2850 		      if (dump_file)
2851 			fprintf (dump_file, "Changed jump %d->%d to return.\n",
2852 				 b->index, single_succ (b)->index);
2853 		      redirect_edge_succ (single_succ_edge (b),
2854 					  EXIT_BLOCK_PTR_FOR_FN (cfun));
2855 		      single_succ_edge (b)->flags &= ~EDGE_CROSSING;
2856 		      changed_here = true;
2857 		    }
2858 		}
2859 
2860 	      /* Try to change a conditional branch to a return to the
2861 		 respective conditional return.  */
2862 	      if (EDGE_COUNT (b->succs) == 2
2863 		  && any_condjump_p (BB_END (b))
2864 		  && bb_is_just_return (BRANCH_EDGE (b)->dest, &ret, &use))
2865 		{
2866 		  if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2867 				     PATTERN (ret), 0))
2868 		    {
2869 		      if (use)
2870 			emit_insn_before (copy_insn (PATTERN (use)),
2871 					  BB_END (b));
2872 		      if (dump_file)
2873 			fprintf (dump_file, "Changed conditional jump %d->%d "
2874 				 "to conditional return.\n",
2875 				 b->index, BRANCH_EDGE (b)->dest->index);
2876 		      redirect_edge_succ (BRANCH_EDGE (b),
2877 					  EXIT_BLOCK_PTR_FOR_FN (cfun));
2878 		      BRANCH_EDGE (b)->flags &= ~EDGE_CROSSING;
2879 		      changed_here = true;
2880 		    }
2881 		}
2882 
2883 	      /* Try to flip a conditional branch that falls through to
2884 		 a return so that it becomes a conditional return and a
2885 		 new jump to the original branch target.  */
2886 	      if (EDGE_COUNT (b->succs) == 2
2887 		  && BRANCH_EDGE (b)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
2888 		  && any_condjump_p (BB_END (b))
2889 		  && bb_is_just_return (FALLTHRU_EDGE (b)->dest, &ret, &use))
2890 		{
2891 		  if (invert_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2892 				   JUMP_LABEL (BB_END (b)), 0))
2893 		    {
2894 		      basic_block new_ft = BRANCH_EDGE (b)->dest;
2895 		      if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2896 					 PATTERN (ret), 0))
2897 			{
2898 			  if (use)
2899 			    emit_insn_before (copy_insn (PATTERN (use)),
2900 					      BB_END (b));
2901 			  if (dump_file)
2902 			    fprintf (dump_file, "Changed conditional jump "
2903 				     "%d->%d to conditional return, adding "
2904 				     "fall-through jump.\n",
2905 				     b->index, BRANCH_EDGE (b)->dest->index);
2906 			  redirect_edge_succ (BRANCH_EDGE (b),
2907 					      EXIT_BLOCK_PTR_FOR_FN (cfun));
2908 			  BRANCH_EDGE (b)->flags &= ~EDGE_CROSSING;
2909 			  std::swap (BRANCH_EDGE (b)->probability,
2910 				     FALLTHRU_EDGE (b)->probability);
2911 			  update_br_prob_note (b);
2912 			  basic_block jb = force_nonfallthru (FALLTHRU_EDGE (b));
2913 			  notice_new_block (jb);
2914 			  if (!redirect_jump (as_a <rtx_jump_insn *> (BB_END (jb)),
2915 					      block_label (new_ft), 0))
2916 			    gcc_unreachable ();
2917 			  redirect_edge_succ (single_succ_edge (jb), new_ft);
2918 			  changed_here = true;
2919 			}
2920 		      else
2921 			{
2922 			  /* Invert the jump back to what it was.  This should
2923 			     never fail.  */
2924 			  if (!invert_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2925 					    JUMP_LABEL (BB_END (b)), 0))
2926 			    gcc_unreachable ();
2927 			}
2928 		    }
2929 		}
2930 
2931 	      /* Simplify branch over branch.  */
2932 	      if ((mode & CLEANUP_EXPENSIVE)
2933 		   && !(mode & CLEANUP_CFGLAYOUT)
2934 		   && try_simplify_condjump (b))
2935 		changed_here = true;
2936 
2937 	      /* If B has a single outgoing edge, but uses a
2938 		 non-trivial jump instruction without side-effects, we
2939 		 can either delete the jump entirely, or replace it
2940 		 with a simple unconditional jump.  */
2941 	      if (single_succ_p (b)
2942 		  && single_succ (b) != EXIT_BLOCK_PTR_FOR_FN (cfun)
2943 		  && onlyjump_p (BB_END (b))
2944 		  && !CROSSING_JUMP_P (BB_END (b))
2945 		  && try_redirect_by_replacing_jump (single_succ_edge (b),
2946 						     single_succ (b),
2947 						     (mode & CLEANUP_CFGLAYOUT) != 0))
2948 		{
2949 		  update_forwarder_flag (b);
2950 		  changed_here = true;
2951 		}
2952 
2953 	      /* Simplify branch to branch.  */
2954 	      if (try_forward_edges (mode, b))
2955 		{
2956 		  update_forwarder_flag (b);
2957 		  changed_here = true;
2958 		}
2959 
2960 	      /* Look for shared code between blocks.  */
2961 	      if ((mode & CLEANUP_CROSSJUMP)
2962 		  && try_crossjump_bb (mode, b))
2963 		changed_here = true;
2964 
2965 	      if ((mode & CLEANUP_CROSSJUMP)
2966 		  /* This can lengthen register lifetimes.  Do it only after
2967 		     reload.  */
2968 		  && reload_completed
2969 		  && try_head_merge_bb (b))
2970 		changed_here = true;
2971 
2972 	      /* Don't get confused by the index shift caused by
2973 		 deleting blocks.  */
2974 	      if (!changed_here)
2975 		b = b->next_bb;
2976 	      else
2977 		changed = true;
2978 	    }
2979 
2980 	  if ((mode & CLEANUP_CROSSJUMP)
2981 	      && try_crossjump_bb (mode, EXIT_BLOCK_PTR_FOR_FN (cfun)))
2982 	    changed = true;
2983 
2984 	  if (block_was_dirty)
2985 	    {
2986 	      /* This should only be set by head-merging.  */
2987 	      gcc_assert (mode & CLEANUP_CROSSJUMP);
2988 	      df_analyze ();
2989 	    }
2990 
2991 	  if (changed)
2992             {
2993               /* Edge forwarding in particular can cause hot blocks previously
2994                  reached by both hot and cold blocks to become dominated only
2995                  by cold blocks. This will cause the verification below to fail,
2996                  and lead to now cold code in the hot section. This is not easy
2997                  to detect and fix during edge forwarding, and in some cases
2998                  is only visible after newly unreachable blocks are deleted,
2999                  which will be done in fixup_partitions.  */
3000 	      if ((mode & CLEANUP_NO_PARTITIONING) == 0)
3001 		{
3002 		  fixup_partitions ();
3003 	          checking_verify_flow_info ();
3004 		}
3005             }
3006 
3007 	  changed_overall |= changed;
3008 	  first_pass = false;
3009 	}
3010       while (changed);
3011     }
3012 
3013   FOR_ALL_BB_FN (b, cfun)
3014     b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
3015 
3016   return changed_overall;
3017 }
3018 
3019 /* Delete all unreachable basic blocks.  */
3020 
3021 bool
delete_unreachable_blocks(void)3022 delete_unreachable_blocks (void)
3023 {
3024   bool changed = false;
3025   basic_block b, prev_bb;
3026 
3027   find_unreachable_blocks ();
3028 
3029   /* When we're in GIMPLE mode and there may be debug bind insns, we
3030      should delete blocks in reverse dominator order, so as to get a
3031      chance to substitute all released DEFs into debug bind stmts.  If
3032      we don't have dominators information, walking blocks backward
3033      gets us a better chance of retaining most debug information than
3034      otherwise.  */
3035   if (MAY_HAVE_DEBUG_BIND_INSNS && current_ir_type () == IR_GIMPLE
3036       && dom_info_available_p (CDI_DOMINATORS))
3037     {
3038       for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
3039 	   b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
3040 	{
3041 	  prev_bb = b->prev_bb;
3042 
3043 	  if (!(b->flags & BB_REACHABLE))
3044 	    {
3045 	      /* Speed up the removal of blocks that don't dominate
3046 		 others.  Walking backwards, this should be the common
3047 		 case.  */
3048 	      if (!first_dom_son (CDI_DOMINATORS, b))
3049 		delete_basic_block (b);
3050 	      else
3051 		{
3052 		  vec<basic_block> h
3053 		    = get_all_dominated_blocks (CDI_DOMINATORS, b);
3054 
3055 		  while (h.length ())
3056 		    {
3057 		      b = h.pop ();
3058 
3059 		      prev_bb = b->prev_bb;
3060 
3061 		      gcc_assert (!(b->flags & BB_REACHABLE));
3062 
3063 		      delete_basic_block (b);
3064 		    }
3065 
3066 		  h.release ();
3067 		}
3068 
3069 	      changed = true;
3070 	    }
3071 	}
3072     }
3073   else
3074     {
3075       for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
3076 	   b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
3077 	{
3078 	  prev_bb = b->prev_bb;
3079 
3080 	  if (!(b->flags & BB_REACHABLE))
3081 	    {
3082 	      delete_basic_block (b);
3083 	      changed = true;
3084 	    }
3085 	}
3086     }
3087 
3088   if (changed)
3089     tidy_fallthru_edges ();
3090   return changed;
3091 }
3092 
3093 /* Delete any jump tables never referenced.  We can't delete them at the
3094    time of removing tablejump insn as they are referenced by the preceding
3095    insns computing the destination, so we delay deleting and garbagecollect
3096    them once life information is computed.  */
3097 void
delete_dead_jumptables(void)3098 delete_dead_jumptables (void)
3099 {
3100   basic_block bb;
3101 
3102   /* A dead jump table does not belong to any basic block.  Scan insns
3103      between two adjacent basic blocks.  */
3104   FOR_EACH_BB_FN (bb, cfun)
3105     {
3106       rtx_insn *insn, *next;
3107 
3108       for (insn = NEXT_INSN (BB_END (bb));
3109 	   insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
3110 	   insn = next)
3111 	{
3112 	  next = NEXT_INSN (insn);
3113 	  if (LABEL_P (insn)
3114 	      && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
3115 	      && JUMP_TABLE_DATA_P (next))
3116 	    {
3117 	      rtx_insn *label = insn, *jump = next;
3118 
3119 	      if (dump_file)
3120 		fprintf (dump_file, "Dead jumptable %i removed\n",
3121 			 INSN_UID (insn));
3122 
3123 	      next = NEXT_INSN (next);
3124 	      delete_insn (jump);
3125 	      delete_insn (label);
3126 	    }
3127 	}
3128     }
3129 }
3130 
3131 
3132 /* Tidy the CFG by deleting unreachable code and whatnot.  */
3133 
3134 bool
cleanup_cfg(int mode)3135 cleanup_cfg (int mode)
3136 {
3137   bool changed = false;
3138 
3139   /* Set the cfglayout mode flag here.  We could update all the callers
3140      but that is just inconvenient, especially given that we eventually
3141      want to have cfglayout mode as the default.  */
3142   if (current_ir_type () == IR_RTL_CFGLAYOUT)
3143     mode |= CLEANUP_CFGLAYOUT;
3144 
3145   timevar_push (TV_CLEANUP_CFG);
3146   if (delete_unreachable_blocks ())
3147     {
3148       changed = true;
3149       /* We've possibly created trivially dead code.  Cleanup it right
3150 	 now to introduce more opportunities for try_optimize_cfg.  */
3151       if (!(mode & (CLEANUP_NO_INSN_DEL))
3152 	  && !reload_completed)
3153 	delete_trivially_dead_insns (get_insns (), max_reg_num ());
3154     }
3155 
3156   compact_blocks ();
3157 
3158   /* To tail-merge blocks ending in the same noreturn function (e.g.
3159      a call to abort) we have to insert fake edges to exit.  Do this
3160      here once.  The fake edges do not interfere with any other CFG
3161      cleanups.  */
3162   if (mode & CLEANUP_CROSSJUMP)
3163     add_noreturn_fake_exit_edges ();
3164 
3165   if (!dbg_cnt (cfg_cleanup))
3166     return changed;
3167 
3168   while (try_optimize_cfg (mode))
3169     {
3170       delete_unreachable_blocks (), changed = true;
3171       if (!(mode & CLEANUP_NO_INSN_DEL))
3172 	{
3173 	  /* Try to remove some trivially dead insns when doing an expensive
3174 	     cleanup.  But delete_trivially_dead_insns doesn't work after
3175 	     reload (it only handles pseudos) and run_fast_dce is too costly
3176 	     to run in every iteration.
3177 
3178 	     For effective cross jumping, we really want to run a fast DCE to
3179 	     clean up any dead conditions, or they get in the way of performing
3180 	     useful tail merges.
3181 
3182 	     Other transformations in cleanup_cfg are not so sensitive to dead
3183 	     code, so delete_trivially_dead_insns or even doing nothing at all
3184 	     is good enough.  */
3185 	  if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
3186 	      && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
3187 	    break;
3188 	  if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occurred)
3189 	    run_fast_dce ();
3190 	}
3191       else
3192 	break;
3193     }
3194 
3195   if (mode & CLEANUP_CROSSJUMP)
3196     remove_fake_exit_edges ();
3197 
3198   /* Don't call delete_dead_jumptables in cfglayout mode, because
3199      that function assumes that jump tables are in the insns stream.
3200      But we also don't _have_ to delete dead jumptables in cfglayout
3201      mode because we shouldn't even be looking at things that are
3202      not in a basic block.  Dead jumptables are cleaned up when
3203      going out of cfglayout mode.  */
3204   if (!(mode & CLEANUP_CFGLAYOUT))
3205     delete_dead_jumptables ();
3206 
3207   /* ???  We probably do this way too often.  */
3208   if (current_loops
3209       && (changed
3210 	  || (mode & CLEANUP_CFG_CHANGED)))
3211     {
3212       timevar_push (TV_REPAIR_LOOPS);
3213       /* The above doesn't preserve dominance info if available.  */
3214       gcc_assert (!dom_info_available_p (CDI_DOMINATORS));
3215       calculate_dominance_info (CDI_DOMINATORS);
3216       fix_loop_structure (NULL);
3217       free_dominance_info (CDI_DOMINATORS);
3218       timevar_pop (TV_REPAIR_LOOPS);
3219     }
3220 
3221   timevar_pop (TV_CLEANUP_CFG);
3222 
3223   return changed;
3224 }
3225 
3226 namespace {
3227 
3228 const pass_data pass_data_jump =
3229 {
3230   RTL_PASS, /* type */
3231   "jump", /* name */
3232   OPTGROUP_NONE, /* optinfo_flags */
3233   TV_JUMP, /* tv_id */
3234   0, /* properties_required */
3235   0, /* properties_provided */
3236   0, /* properties_destroyed */
3237   0, /* todo_flags_start */
3238   0, /* todo_flags_finish */
3239 };
3240 
3241 class pass_jump : public rtl_opt_pass
3242 {
3243 public:
pass_jump(gcc::context * ctxt)3244   pass_jump (gcc::context *ctxt)
3245     : rtl_opt_pass (pass_data_jump, ctxt)
3246   {}
3247 
3248   /* opt_pass methods: */
3249   virtual unsigned int execute (function *);
3250 
3251 }; // class pass_jump
3252 
3253 unsigned int
execute(function *)3254 pass_jump::execute (function *)
3255 {
3256   delete_trivially_dead_insns (get_insns (), max_reg_num ());
3257   if (dump_file)
3258     dump_flow_info (dump_file, dump_flags);
3259   cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
3260 	       | (flag_thread_jumps ? CLEANUP_THREADING : 0));
3261   return 0;
3262 }
3263 
3264 } // anon namespace
3265 
3266 rtl_opt_pass *
make_pass_jump(gcc::context * ctxt)3267 make_pass_jump (gcc::context *ctxt)
3268 {
3269   return new pass_jump (ctxt);
3270 }
3271 
3272 namespace {
3273 
3274 const pass_data pass_data_jump2 =
3275 {
3276   RTL_PASS, /* type */
3277   "jump2", /* name */
3278   OPTGROUP_NONE, /* optinfo_flags */
3279   TV_JUMP, /* tv_id */
3280   0, /* properties_required */
3281   0, /* properties_provided */
3282   0, /* properties_destroyed */
3283   0, /* todo_flags_start */
3284   0, /* todo_flags_finish */
3285 };
3286 
3287 class pass_jump2 : public rtl_opt_pass
3288 {
3289 public:
pass_jump2(gcc::context * ctxt)3290   pass_jump2 (gcc::context *ctxt)
3291     : rtl_opt_pass (pass_data_jump2, ctxt)
3292   {}
3293 
3294   /* opt_pass methods: */
execute(function *)3295   virtual unsigned int execute (function *)
3296     {
3297       cleanup_cfg (flag_crossjumping ? CLEANUP_CROSSJUMP : 0);
3298       return 0;
3299     }
3300 
3301 }; // class pass_jump2
3302 
3303 } // anon namespace
3304 
3305 rtl_opt_pass *
make_pass_jump2(gcc::context * ctxt)3306 make_pass_jump2 (gcc::context *ctxt)
3307 {
3308   return new pass_jump2 (ctxt);
3309 }
3310