1 /* Control flow optimization code for GNU compiler.
2    Copyright (C) 1987-2019 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* This file contains optimizer of the control flow.  The main entry point is
21    cleanup_cfg.  Following optimizations are performed:
22 
23    - Unreachable blocks removal
24    - Edge forwarding (edge to the forwarder block is forwarded to its
25      successor.  Simplification of the branch instruction is performed by
26      underlying infrastructure so branch can be converted to simplejump or
27      eliminated).
28    - Cross jumping (tail merging)
29    - Conditional jump-around-simplejump simplification
30    - Basic block merging.  */
31 
32 #include "config.h"
33 #include "system.h"
34 #include "coretypes.h"
35 #include "backend.h"
36 #include "target.h"
37 #include "rtl.h"
38 #include "tree.h"
39 #include "cfghooks.h"
40 #include "df.h"
41 #include "memmodel.h"
42 #include "tm_p.h"
43 #include "insn-config.h"
44 #include "emit-rtl.h"
45 #include "cselib.h"
46 #include "params.h"
47 #include "tree-pass.h"
48 #include "cfgloop.h"
49 #include "cfgrtl.h"
50 #include "cfganal.h"
51 #include "cfgbuild.h"
52 #include "cfgcleanup.h"
53 #include "dce.h"
54 #include "dbgcnt.h"
55 #include "rtl-iter.h"
56 
57 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
58 
59 /* Set to true when we are running first pass of try_optimize_cfg loop.  */
60 static bool first_pass;
61 
62 /* Set to true if crossjumps occurred in the latest run of try_optimize_cfg.  */
63 static bool crossjumps_occurred;
64 
65 /* Set to true if we couldn't run an optimization due to stale liveness
66    information; we should run df_analyze to enable more opportunities.  */
67 static bool block_was_dirty;
68 
69 static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction);
70 static bool try_crossjump_bb (int, basic_block);
71 static bool outgoing_edges_match (int, basic_block, basic_block);
72 static enum replace_direction old_insns_match_p (int, rtx_insn *, rtx_insn *);
73 
74 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
75 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
76 static bool try_optimize_cfg (int);
77 static bool try_simplify_condjump (basic_block);
78 static bool try_forward_edges (int, basic_block);
79 static edge thread_jump (edge, basic_block);
80 static bool mark_effect (rtx, bitmap);
81 static void notice_new_block (basic_block);
82 static void update_forwarder_flag (basic_block);
83 static void merge_memattrs (rtx, rtx);
84 
85 /* Set flags for newly created block.  */
86 
87 static void
notice_new_block(basic_block bb)88 notice_new_block (basic_block bb)
89 {
90   if (!bb)
91     return;
92 
93   if (forwarder_block_p (bb))
94     bb->flags |= BB_FORWARDER_BLOCK;
95 }
96 
97 /* Recompute forwarder flag after block has been modified.  */
98 
99 static void
update_forwarder_flag(basic_block bb)100 update_forwarder_flag (basic_block bb)
101 {
102   if (forwarder_block_p (bb))
103     bb->flags |= BB_FORWARDER_BLOCK;
104   else
105     bb->flags &= ~BB_FORWARDER_BLOCK;
106 }
107 
108 /* Simplify a conditional jump around an unconditional jump.
109    Return true if something changed.  */
110 
111 static bool
try_simplify_condjump(basic_block cbranch_block)112 try_simplify_condjump (basic_block cbranch_block)
113 {
114   basic_block jump_block, jump_dest_block, cbranch_dest_block;
115   edge cbranch_jump_edge, cbranch_fallthru_edge;
116   rtx_insn *cbranch_insn;
117 
118   /* Verify that there are exactly two successors.  */
119   if (EDGE_COUNT (cbranch_block->succs) != 2)
120     return false;
121 
122   /* Verify that we've got a normal conditional branch at the end
123      of the block.  */
124   cbranch_insn = BB_END (cbranch_block);
125   if (!any_condjump_p (cbranch_insn))
126     return false;
127 
128   cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
129   cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
130 
131   /* The next block must not have multiple predecessors, must not
132      be the last block in the function, and must contain just the
133      unconditional jump.  */
134   jump_block = cbranch_fallthru_edge->dest;
135   if (!single_pred_p (jump_block)
136       || jump_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
137       || !FORWARDER_BLOCK_P (jump_block))
138     return false;
139   jump_dest_block = single_succ (jump_block);
140 
141   /* If we are partitioning hot/cold basic blocks, we don't want to
142      mess up unconditional or indirect jumps that cross between hot
143      and cold sections.
144 
145      Basic block partitioning may result in some jumps that appear to
146      be optimizable (or blocks that appear to be mergeable), but which really
147      must be left untouched (they are required to make it safely across
148      partition boundaries).  See the comments at the top of
149      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
150 
151   if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
152       || (cbranch_jump_edge->flags & EDGE_CROSSING))
153     return false;
154 
155   /* The conditional branch must target the block after the
156      unconditional branch.  */
157   cbranch_dest_block = cbranch_jump_edge->dest;
158 
159   if (cbranch_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
160       || jump_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
161       || !can_fallthru (jump_block, cbranch_dest_block))
162     return false;
163 
164   /* Invert the conditional branch.  */
165   if (!invert_jump (as_a <rtx_jump_insn *> (cbranch_insn),
166 		    block_label (jump_dest_block), 0))
167     return false;
168 
169   if (dump_file)
170     fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
171 	     INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
172 
173   /* Success.  Update the CFG to match.  Note that after this point
174      the edge variable names appear backwards; the redirection is done
175      this way to preserve edge profile data.  */
176   cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
177 						cbranch_dest_block);
178   cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
179 						    jump_dest_block);
180   cbranch_jump_edge->flags |= EDGE_FALLTHRU;
181   cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
182   update_br_prob_note (cbranch_block);
183 
184   /* Delete the block with the unconditional jump, and clean up the mess.  */
185   delete_basic_block (jump_block);
186   tidy_fallthru_edge (cbranch_jump_edge);
187   update_forwarder_flag (cbranch_block);
188 
189   return true;
190 }
191 
192 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
193    on register.  Used by jump threading.  */
194 
195 static bool
mark_effect(rtx exp,regset nonequal)196 mark_effect (rtx exp, regset nonequal)
197 {
198   rtx dest;
199   switch (GET_CODE (exp))
200     {
201       /* In case we do clobber the register, mark it as equal, as we know the
202 	 value is dead so it don't have to match.  */
203     case CLOBBER:
204       dest = XEXP (exp, 0);
205       if (REG_P (dest))
206 	bitmap_clear_range (nonequal, REGNO (dest), REG_NREGS (dest));
207       return false;
208 
209     case SET:
210       if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
211 	return false;
212       dest = SET_DEST (exp);
213       if (dest == pc_rtx)
214 	return false;
215       if (!REG_P (dest))
216 	return true;
217       bitmap_set_range (nonequal, REGNO (dest), REG_NREGS (dest));
218       return false;
219 
220     default:
221       return false;
222     }
223 }
224 
225 /* Return true if X contains a register in NONEQUAL.  */
226 static bool
mentions_nonequal_regs(const_rtx x,regset nonequal)227 mentions_nonequal_regs (const_rtx x, regset nonequal)
228 {
229   subrtx_iterator::array_type array;
230   FOR_EACH_SUBRTX (iter, array, x, NONCONST)
231     {
232       const_rtx x = *iter;
233       if (REG_P (x))
234 	{
235 	  unsigned int end_regno = END_REGNO (x);
236 	  for (unsigned int regno = REGNO (x); regno < end_regno; ++regno)
237 	    if (REGNO_REG_SET_P (nonequal, regno))
238 	      return true;
239 	}
240     }
241   return false;
242 }
243 
244 /* Attempt to prove that the basic block B will have no side effects and
245    always continues in the same edge if reached via E.  Return the edge
246    if exist, NULL otherwise.  */
247 
248 static edge
thread_jump(edge e,basic_block b)249 thread_jump (edge e, basic_block b)
250 {
251   rtx set1, set2, cond1, cond2;
252   rtx_insn *insn;
253   enum rtx_code code1, code2, reversed_code2;
254   bool reverse1 = false;
255   unsigned i;
256   regset nonequal;
257   bool failed = false;
258   reg_set_iterator rsi;
259 
260   if (b->flags & BB_NONTHREADABLE_BLOCK)
261     return NULL;
262 
263   /* At the moment, we do handle only conditional jumps, but later we may
264      want to extend this code to tablejumps and others.  */
265   if (EDGE_COUNT (e->src->succs) != 2)
266     return NULL;
267   if (EDGE_COUNT (b->succs) != 2)
268     {
269       b->flags |= BB_NONTHREADABLE_BLOCK;
270       return NULL;
271     }
272 
273   /* Second branch must end with onlyjump, as we will eliminate the jump.  */
274   if (!any_condjump_p (BB_END (e->src)))
275     return NULL;
276 
277   if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
278     {
279       b->flags |= BB_NONTHREADABLE_BLOCK;
280       return NULL;
281     }
282 
283   set1 = pc_set (BB_END (e->src));
284   set2 = pc_set (BB_END (b));
285   if (((e->flags & EDGE_FALLTHRU) != 0)
286       != (XEXP (SET_SRC (set1), 1) == pc_rtx))
287     reverse1 = true;
288 
289   cond1 = XEXP (SET_SRC (set1), 0);
290   cond2 = XEXP (SET_SRC (set2), 0);
291   if (reverse1)
292     code1 = reversed_comparison_code (cond1, BB_END (e->src));
293   else
294     code1 = GET_CODE (cond1);
295 
296   code2 = GET_CODE (cond2);
297   reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
298 
299   if (!comparison_dominates_p (code1, code2)
300       && !comparison_dominates_p (code1, reversed_code2))
301     return NULL;
302 
303   /* Ensure that the comparison operators are equivalent.
304      ??? This is far too pessimistic.  We should allow swapped operands,
305      different CCmodes, or for example comparisons for interval, that
306      dominate even when operands are not equivalent.  */
307   if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
308       || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
309     return NULL;
310 
311   /* Punt if BB_END (e->src) is doloop-like conditional jump that modifies
312      the registers used in cond1.  */
313   if (modified_in_p (cond1, BB_END (e->src)))
314     return NULL;
315 
316   /* Short circuit cases where block B contains some side effects, as we can't
317      safely bypass it.  */
318   for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
319        insn = NEXT_INSN (insn))
320     if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
321       {
322 	b->flags |= BB_NONTHREADABLE_BLOCK;
323 	return NULL;
324       }
325 
326   cselib_init (0);
327 
328   /* First process all values computed in the source basic block.  */
329   for (insn = NEXT_INSN (BB_HEAD (e->src));
330        insn != NEXT_INSN (BB_END (e->src));
331        insn = NEXT_INSN (insn))
332     if (INSN_P (insn))
333       cselib_process_insn (insn);
334 
335   nonequal = BITMAP_ALLOC (NULL);
336   CLEAR_REG_SET (nonequal);
337 
338   /* Now assume that we've continued by the edge E to B and continue
339      processing as if it were same basic block.
340      Our goal is to prove that whole block is an NOOP.  */
341 
342   for (insn = NEXT_INSN (BB_HEAD (b));
343        insn != NEXT_INSN (BB_END (b)) && !failed;
344        insn = NEXT_INSN (insn))
345     {
346       /* cond2 must not mention any register that is not equal to the
347 	 former block.  Check this before processing that instruction,
348 	 as BB_END (b) could contain also clobbers.  */
349       if (insn == BB_END (b)
350 	  && mentions_nonequal_regs (cond2, nonequal))
351 	goto failed_exit;
352 
353       if (INSN_P (insn))
354 	{
355 	  rtx pat = PATTERN (insn);
356 
357 	  if (GET_CODE (pat) == PARALLEL)
358 	    {
359 	      for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
360 		failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
361 	    }
362 	  else
363 	    failed |= mark_effect (pat, nonequal);
364 	}
365 
366       cselib_process_insn (insn);
367     }
368 
369   /* Later we should clear nonequal of dead registers.  So far we don't
370      have life information in cfg_cleanup.  */
371   if (failed)
372     {
373       b->flags |= BB_NONTHREADABLE_BLOCK;
374       goto failed_exit;
375     }
376 
377   EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
378     goto failed_exit;
379 
380   BITMAP_FREE (nonequal);
381   cselib_finish ();
382   if ((comparison_dominates_p (code1, code2) != 0)
383       != (XEXP (SET_SRC (set2), 1) == pc_rtx))
384     return BRANCH_EDGE (b);
385   else
386     return FALLTHRU_EDGE (b);
387 
388 failed_exit:
389   BITMAP_FREE (nonequal);
390   cselib_finish ();
391   return NULL;
392 }
393 
394 /* Attempt to forward edges leaving basic block B.
395    Return true if successful.  */
396 
397 static bool
try_forward_edges(int mode,basic_block b)398 try_forward_edges (int mode, basic_block b)
399 {
400   bool changed = false;
401   edge_iterator ei;
402   edge e, *threaded_edges = NULL;
403 
404   for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
405     {
406       basic_block target, first;
407       location_t goto_locus;
408       int counter;
409       bool threaded = false;
410       int nthreaded_edges = 0;
411       bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0;
412       bool new_target_threaded = false;
413 
414       /* Skip complex edges because we don't know how to update them.
415 
416 	 Still handle fallthru edges, as we can succeed to forward fallthru
417 	 edge to the same place as the branch edge of conditional branch
418 	 and turn conditional branch to an unconditional branch.  */
419       if (e->flags & EDGE_COMPLEX)
420 	{
421 	  ei_next (&ei);
422 	  continue;
423 	}
424 
425       target = first = e->dest;
426       counter = NUM_FIXED_BLOCKS;
427       goto_locus = e->goto_locus;
428 
429       while (counter < n_basic_blocks_for_fn (cfun))
430 	{
431 	  basic_block new_target = NULL;
432 	  may_thread |= (target->flags & BB_MODIFIED) != 0;
433 
434 	  if (FORWARDER_BLOCK_P (target)
435 	      && single_succ (target) != EXIT_BLOCK_PTR_FOR_FN (cfun))
436 	    {
437 	      /* Bypass trivial infinite loops.  */
438 	      new_target = single_succ (target);
439 	      if (target == new_target)
440 		counter = n_basic_blocks_for_fn (cfun);
441 	      else if (!optimize)
442 		{
443 		  /* When not optimizing, ensure that edges or forwarder
444 		     blocks with different locus are not optimized out.  */
445 		  location_t new_locus = single_succ_edge (target)->goto_locus;
446 		  location_t locus = goto_locus;
447 
448 		  if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
449 		      && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
450 		      && new_locus != locus)
451 		    new_target = NULL;
452 		  else
453 		    {
454 		      if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
455 			locus = new_locus;
456 
457 		      rtx_insn *last = BB_END (target);
458 		      if (DEBUG_INSN_P (last))
459 			last = prev_nondebug_insn (last);
460 		      if (last && INSN_P (last))
461 			new_locus = INSN_LOCATION (last);
462 		      else
463 			new_locus = UNKNOWN_LOCATION;
464 
465 		      if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
466 			  && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
467 			  && new_locus != locus)
468 			new_target = NULL;
469 		      else
470 			{
471 			  if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
472 			    locus = new_locus;
473 
474 			  goto_locus = locus;
475 			}
476 		    }
477 		}
478 	    }
479 
480 	  /* Allow to thread only over one edge at time to simplify updating
481 	     of probabilities.  */
482 	  else if ((mode & CLEANUP_THREADING) && may_thread)
483 	    {
484 	      edge t = thread_jump (e, target);
485 	      if (t)
486 		{
487 		  if (!threaded_edges)
488 		    threaded_edges = XNEWVEC (edge,
489 					      n_basic_blocks_for_fn (cfun));
490 		  else
491 		    {
492 		      int i;
493 
494 		      /* Detect an infinite loop across blocks not
495 			 including the start block.  */
496 		      for (i = 0; i < nthreaded_edges; ++i)
497 			if (threaded_edges[i] == t)
498 			  break;
499 		      if (i < nthreaded_edges)
500 			{
501 			  counter = n_basic_blocks_for_fn (cfun);
502 			  break;
503 			}
504 		    }
505 
506 		  /* Detect an infinite loop across the start block.  */
507 		  if (t->dest == b)
508 		    break;
509 
510 		  gcc_assert (nthreaded_edges
511 			      < (n_basic_blocks_for_fn (cfun)
512 				 - NUM_FIXED_BLOCKS));
513 		  threaded_edges[nthreaded_edges++] = t;
514 
515 		  new_target = t->dest;
516 		  new_target_threaded = true;
517 		}
518 	    }
519 
520 	  if (!new_target)
521 	    break;
522 
523 	  counter++;
524 	  /* Do not turn non-crossing jump to crossing.  Depending on target
525 	     it may require different instruction pattern.  */
526 	  if ((e->flags & EDGE_CROSSING)
527 	      || BB_PARTITION (first) == BB_PARTITION (new_target))
528 	    {
529 	      target = new_target;
530 	      threaded |= new_target_threaded;
531 	    }
532 	}
533 
534       if (counter >= n_basic_blocks_for_fn (cfun))
535 	{
536 	  if (dump_file)
537 	    fprintf (dump_file, "Infinite loop in BB %i.\n",
538 		     target->index);
539 	}
540       else if (target == first)
541 	; /* We didn't do anything.  */
542       else
543 	{
544 	  /* Save the values now, as the edge may get removed.  */
545 	  profile_count edge_count = e->count ();
546 	  int n = 0;
547 
548 	  e->goto_locus = goto_locus;
549 
550 	  /* Don't force if target is exit block.  */
551 	  if (threaded && target != EXIT_BLOCK_PTR_FOR_FN (cfun))
552 	    {
553 	      notice_new_block (redirect_edge_and_branch_force (e, target));
554 	      if (dump_file)
555 		fprintf (dump_file, "Conditionals threaded.\n");
556 	    }
557 	  else if (!redirect_edge_and_branch (e, target))
558 	    {
559 	      if (dump_file)
560 		fprintf (dump_file,
561 			 "Forwarding edge %i->%i to %i failed.\n",
562 			 b->index, e->dest->index, target->index);
563 	      ei_next (&ei);
564 	      continue;
565 	    }
566 
567 	  /* We successfully forwarded the edge.  Now update profile
568 	     data: for each edge we traversed in the chain, remove
569 	     the original edge's execution count.  */
570 	  do
571 	    {
572 	      edge t;
573 
574 	      if (!single_succ_p (first))
575 		{
576 		  gcc_assert (n < nthreaded_edges);
577 		  t = threaded_edges [n++];
578 		  gcc_assert (t->src == first);
579 		  update_bb_profile_for_threading (first, edge_count, t);
580 		  update_br_prob_note (first);
581 		}
582 	      else
583 		{
584 		  first->count -= edge_count;
585 		  /* It is possible that as the result of
586 		     threading we've removed edge as it is
587 		     threaded to the fallthru edge.  Avoid
588 		     getting out of sync.  */
589 		  if (n < nthreaded_edges
590 		      && first == threaded_edges [n]->src)
591 		    n++;
592 		  t = single_succ_edge (first);
593 		}
594 
595 	      first = t->dest;
596 	    }
597 	  while (first != target);
598 
599 	  changed = true;
600 	  continue;
601 	}
602       ei_next (&ei);
603     }
604 
605   free (threaded_edges);
606   return changed;
607 }
608 
609 
610 /* Blocks A and B are to be merged into a single block.  A has no incoming
611    fallthru edge, so it can be moved before B without adding or modifying
612    any jumps (aside from the jump from A to B).  */
613 
614 static void
merge_blocks_move_predecessor_nojumps(basic_block a,basic_block b)615 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
616 {
617   rtx_insn *barrier;
618 
619   /* If we are partitioning hot/cold basic blocks, we don't want to
620      mess up unconditional or indirect jumps that cross between hot
621      and cold sections.
622 
623      Basic block partitioning may result in some jumps that appear to
624      be optimizable (or blocks that appear to be mergeable), but which really
625      must be left untouched (they are required to make it safely across
626      partition boundaries).  See the comments at the top of
627      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
628 
629   if (BB_PARTITION (a) != BB_PARTITION (b))
630     return;
631 
632   barrier = next_nonnote_insn (BB_END (a));
633   gcc_assert (BARRIER_P (barrier));
634   delete_insn (barrier);
635 
636   /* Scramble the insn chain.  */
637   if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
638     reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
639   df_set_bb_dirty (a);
640 
641   if (dump_file)
642     fprintf (dump_file, "Moved block %d before %d and merged.\n",
643 	     a->index, b->index);
644 
645   /* Swap the records for the two blocks around.  */
646 
647   unlink_block (a);
648   link_block (a, b->prev_bb);
649 
650   /* Now blocks A and B are contiguous.  Merge them.  */
651   merge_blocks (a, b);
652 }
653 
654 /* Blocks A and B are to be merged into a single block.  B has no outgoing
655    fallthru edge, so it can be moved after A without adding or modifying
656    any jumps (aside from the jump from A to B).  */
657 
658 static void
merge_blocks_move_successor_nojumps(basic_block a,basic_block b)659 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
660 {
661   rtx_insn *barrier, *real_b_end;
662   rtx_insn *label;
663   rtx_jump_table_data *table;
664 
665   /* If we are partitioning hot/cold basic blocks, we don't want to
666      mess up unconditional or indirect jumps that cross between hot
667      and cold sections.
668 
669      Basic block partitioning may result in some jumps that appear to
670      be optimizable (or blocks that appear to be mergeable), but which really
671      must be left untouched (they are required to make it safely across
672      partition boundaries).  See the comments at the top of
673      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
674 
675   if (BB_PARTITION (a) != BB_PARTITION (b))
676     return;
677 
678   real_b_end = BB_END (b);
679 
680   /* If there is a jump table following block B temporarily add the jump table
681      to block B so that it will also be moved to the correct location.  */
682   if (tablejump_p (BB_END (b), &label, &table)
683       && prev_active_insn (label) == BB_END (b))
684     {
685       BB_END (b) = table;
686     }
687 
688   /* There had better have been a barrier there.  Delete it.  */
689   barrier = NEXT_INSN (BB_END (b));
690   if (barrier && BARRIER_P (barrier))
691     delete_insn (barrier);
692 
693 
694   /* Scramble the insn chain.  */
695   reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
696 
697   /* Restore the real end of b.  */
698   BB_END (b) = real_b_end;
699 
700   if (dump_file)
701     fprintf (dump_file, "Moved block %d after %d and merged.\n",
702 	     b->index, a->index);
703 
704   /* Now blocks A and B are contiguous.  Merge them.  */
705   merge_blocks (a, b);
706 }
707 
708 /* Attempt to merge basic blocks that are potentially non-adjacent.
709    Return NULL iff the attempt failed, otherwise return basic block
710    where cleanup_cfg should continue.  Because the merging commonly
711    moves basic block away or introduces another optimization
712    possibility, return basic block just before B so cleanup_cfg don't
713    need to iterate.
714 
715    It may be good idea to return basic block before C in the case
716    C has been moved after B and originally appeared earlier in the
717    insn sequence, but we have no information available about the
718    relative ordering of these two.  Hopefully it is not too common.  */
719 
720 static basic_block
merge_blocks_move(edge e,basic_block b,basic_block c,int mode)721 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
722 {
723   basic_block next;
724 
725   /* If we are partitioning hot/cold basic blocks, we don't want to
726      mess up unconditional or indirect jumps that cross between hot
727      and cold sections.
728 
729      Basic block partitioning may result in some jumps that appear to
730      be optimizable (or blocks that appear to be mergeable), but which really
731      must be left untouched (they are required to make it safely across
732      partition boundaries).  See the comments at the top of
733      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
734 
735   if (BB_PARTITION (b) != BB_PARTITION (c))
736     return NULL;
737 
738   /* If B has a fallthru edge to C, no need to move anything.  */
739   if (e->flags & EDGE_FALLTHRU)
740     {
741       int b_index = b->index, c_index = c->index;
742 
743       /* Protect the loop latches.  */
744       if (current_loops && c->loop_father->latch == c)
745 	return NULL;
746 
747       merge_blocks (b, c);
748       update_forwarder_flag (b);
749 
750       if (dump_file)
751 	fprintf (dump_file, "Merged %d and %d without moving.\n",
752 		 b_index, c_index);
753 
754       return b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? b : b->prev_bb;
755     }
756 
757   /* Otherwise we will need to move code around.  Do that only if expensive
758      transformations are allowed.  */
759   else if (mode & CLEANUP_EXPENSIVE)
760     {
761       edge tmp_edge, b_fallthru_edge;
762       bool c_has_outgoing_fallthru;
763       bool b_has_incoming_fallthru;
764 
765       /* Avoid overactive code motion, as the forwarder blocks should be
766 	 eliminated by edge redirection instead.  One exception might have
767 	 been if B is a forwarder block and C has no fallthru edge, but
768 	 that should be cleaned up by bb-reorder instead.  */
769       if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
770 	return NULL;
771 
772       /* We must make sure to not munge nesting of lexical blocks,
773 	 and loop notes.  This is done by squeezing out all the notes
774 	 and leaving them there to lie.  Not ideal, but functional.  */
775 
776       tmp_edge = find_fallthru_edge (c->succs);
777       c_has_outgoing_fallthru = (tmp_edge != NULL);
778 
779       tmp_edge = find_fallthru_edge (b->preds);
780       b_has_incoming_fallthru = (tmp_edge != NULL);
781       b_fallthru_edge = tmp_edge;
782       next = b->prev_bb;
783       if (next == c)
784 	next = next->prev_bb;
785 
786       /* Otherwise, we're going to try to move C after B.  If C does
787 	 not have an outgoing fallthru, then it can be moved
788 	 immediately after B without introducing or modifying jumps.  */
789       if (! c_has_outgoing_fallthru)
790 	{
791 	  merge_blocks_move_successor_nojumps (b, c);
792 	  return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
793 	}
794 
795       /* If B does not have an incoming fallthru, then it can be moved
796 	 immediately before C without introducing or modifying jumps.
797 	 C cannot be the first block, so we do not have to worry about
798 	 accessing a non-existent block.  */
799 
800       if (b_has_incoming_fallthru)
801 	{
802 	  basic_block bb;
803 
804 	  if (b_fallthru_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
805 	    return NULL;
806 	  bb = force_nonfallthru (b_fallthru_edge);
807 	  if (bb)
808 	    notice_new_block (bb);
809 	}
810 
811       merge_blocks_move_predecessor_nojumps (b, c);
812       return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
813     }
814 
815   return NULL;
816 }
817 
818 
819 /* Removes the memory attributes of MEM expression
820    if they are not equal.  */
821 
822 static void
merge_memattrs(rtx x,rtx y)823 merge_memattrs (rtx x, rtx y)
824 {
825   int i;
826   int j;
827   enum rtx_code code;
828   const char *fmt;
829 
830   if (x == y)
831     return;
832   if (x == 0 || y == 0)
833     return;
834 
835   code = GET_CODE (x);
836 
837   if (code != GET_CODE (y))
838     return;
839 
840   if (GET_MODE (x) != GET_MODE (y))
841     return;
842 
843   if (code == MEM && !mem_attrs_eq_p (MEM_ATTRS (x), MEM_ATTRS (y)))
844     {
845       if (! MEM_ATTRS (x))
846 	MEM_ATTRS (y) = 0;
847       else if (! MEM_ATTRS (y))
848 	MEM_ATTRS (x) = 0;
849       else
850 	{
851 	  if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
852 	    {
853 	      set_mem_alias_set (x, 0);
854 	      set_mem_alias_set (y, 0);
855 	    }
856 
857 	  if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
858 	    {
859 	      set_mem_expr (x, 0);
860 	      set_mem_expr (y, 0);
861 	      clear_mem_offset (x);
862 	      clear_mem_offset (y);
863 	    }
864 	  else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y)
865 		   || (MEM_OFFSET_KNOWN_P (x)
866 		       && maybe_ne (MEM_OFFSET (x), MEM_OFFSET (y))))
867 	    {
868 	      clear_mem_offset (x);
869 	      clear_mem_offset (y);
870 	    }
871 
872 	  if (!MEM_SIZE_KNOWN_P (x))
873 	    clear_mem_size (y);
874 	  else if (!MEM_SIZE_KNOWN_P (y))
875 	    clear_mem_size (x);
876 	  else if (known_le (MEM_SIZE (x), MEM_SIZE (y)))
877 	    set_mem_size (x, MEM_SIZE (y));
878 	  else if (known_le (MEM_SIZE (y), MEM_SIZE (x)))
879 	    set_mem_size (y, MEM_SIZE (x));
880 	  else
881 	    {
882 	      /* The sizes aren't ordered, so we can't merge them.  */
883 	      clear_mem_size (x);
884 	      clear_mem_size (y);
885 	    }
886 
887 	  set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
888 	  set_mem_align (y, MEM_ALIGN (x));
889 	}
890     }
891   if (code == MEM)
892     {
893       if (MEM_READONLY_P (x) != MEM_READONLY_P (y))
894 	{
895 	  MEM_READONLY_P (x) = 0;
896 	  MEM_READONLY_P (y) = 0;
897 	}
898       if (MEM_NOTRAP_P (x) != MEM_NOTRAP_P (y))
899 	{
900 	  MEM_NOTRAP_P (x) = 0;
901 	  MEM_NOTRAP_P (y) = 0;
902 	}
903       if (MEM_VOLATILE_P (x) != MEM_VOLATILE_P (y))
904 	{
905 	  MEM_VOLATILE_P (x) = 1;
906 	  MEM_VOLATILE_P (y) = 1;
907 	}
908     }
909 
910   fmt = GET_RTX_FORMAT (code);
911   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
912     {
913       switch (fmt[i])
914 	{
915 	case 'E':
916 	  /* Two vectors must have the same length.  */
917 	  if (XVECLEN (x, i) != XVECLEN (y, i))
918 	    return;
919 
920 	  for (j = 0; j < XVECLEN (x, i); j++)
921 	    merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
922 
923 	  break;
924 
925 	case 'e':
926 	  merge_memattrs (XEXP (x, i), XEXP (y, i));
927 	}
928     }
929   return;
930 }
931 
932 
933  /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
934     different single sets S1 and S2.  */
935 
936 static bool
equal_different_set_p(rtx p1,rtx s1,rtx p2,rtx s2)937 equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2)
938 {
939   int i;
940   rtx e1, e2;
941 
942   if (p1 == s1 && p2 == s2)
943     return true;
944 
945   if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL)
946     return false;
947 
948   if (XVECLEN (p1, 0) != XVECLEN (p2, 0))
949     return false;
950 
951   for (i = 0; i < XVECLEN (p1, 0); i++)
952     {
953       e1 = XVECEXP (p1, 0, i);
954       e2 = XVECEXP (p2, 0, i);
955       if (e1 == s1 && e2 == s2)
956         continue;
957       if (reload_completed
958           ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2))
959         continue;
960 
961       return false;
962     }
963 
964   return true;
965 }
966 
967 
968 /* NOTE1 is the REG_EQUAL note, if any, attached to an insn
969    that is a single_set with a SET_SRC of SRC1.  Similarly
970    for NOTE2/SRC2.
971 
972    So effectively NOTE1/NOTE2 are an alternate form of
973    SRC1/SRC2 respectively.
974 
975    Return nonzero if SRC1 or NOTE1 has the same constant
976    integer value as SRC2 or NOTE2.   Else return zero.  */
977 static int
values_equal_p(rtx note1,rtx note2,rtx src1,rtx src2)978 values_equal_p (rtx note1, rtx note2, rtx src1, rtx src2)
979 {
980   if (note1
981       && note2
982       && CONST_INT_P (XEXP (note1, 0))
983       && rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0)))
984     return 1;
985 
986   if (!note1
987       && !note2
988       && CONST_INT_P (src1)
989       && CONST_INT_P (src2)
990       && rtx_equal_p (src1, src2))
991     return 1;
992 
993   if (note1
994       && CONST_INT_P (src2)
995       && rtx_equal_p (XEXP (note1, 0), src2))
996     return 1;
997 
998   if (note2
999       && CONST_INT_P (src1)
1000       && rtx_equal_p (XEXP (note2, 0), src1))
1001     return 1;
1002 
1003   return 0;
1004 }
1005 
1006 /* Examine register notes on I1 and I2 and return:
1007    - dir_forward if I1 can be replaced by I2, or
1008    - dir_backward if I2 can be replaced by I1, or
1009    - dir_both if both are the case.  */
1010 
1011 static enum replace_direction
can_replace_by(rtx_insn * i1,rtx_insn * i2)1012 can_replace_by (rtx_insn *i1, rtx_insn *i2)
1013 {
1014   rtx s1, s2, d1, d2, src1, src2, note1, note2;
1015   bool c1, c2;
1016 
1017   /* Check for 2 sets.  */
1018   s1 = single_set (i1);
1019   s2 = single_set (i2);
1020   if (s1 == NULL_RTX || s2 == NULL_RTX)
1021     return dir_none;
1022 
1023   /* Check that the 2 sets set the same dest.  */
1024   d1 = SET_DEST (s1);
1025   d2 = SET_DEST (s2);
1026   if (!(reload_completed
1027         ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2)))
1028     return dir_none;
1029 
1030   /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
1031      set dest to the same value.  */
1032   note1 = find_reg_equal_equiv_note (i1);
1033   note2 = find_reg_equal_equiv_note (i2);
1034 
1035   src1 = SET_SRC (s1);
1036   src2 = SET_SRC (s2);
1037 
1038   if (!values_equal_p (note1, note2, src1, src2))
1039     return dir_none;
1040 
1041   if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2))
1042     return dir_none;
1043 
1044   /* Although the 2 sets set dest to the same value, we cannot replace
1045        (set (dest) (const_int))
1046      by
1047        (set (dest) (reg))
1048      because we don't know if the reg is live and has the same value at the
1049      location of replacement.  */
1050   c1 = CONST_INT_P (src1);
1051   c2 = CONST_INT_P (src2);
1052   if (c1 && c2)
1053     return dir_both;
1054   else if (c2)
1055     return dir_forward;
1056   else if (c1)
1057     return dir_backward;
1058 
1059   return dir_none;
1060 }
1061 
1062 /* Merges directions A and B.  */
1063 
1064 static enum replace_direction
merge_dir(enum replace_direction a,enum replace_direction b)1065 merge_dir (enum replace_direction a, enum replace_direction b)
1066 {
1067   /* Implements the following table:
1068         |bo fw bw no
1069      ---+-----------
1070      bo |bo fw bw no
1071      fw |-- fw no no
1072      bw |-- -- bw no
1073      no |-- -- -- no.  */
1074 
1075   if (a == b)
1076     return a;
1077 
1078   if (a == dir_both)
1079     return b;
1080   if (b == dir_both)
1081     return a;
1082 
1083   return dir_none;
1084 }
1085 
1086 /* Array of flags indexed by reg note kind, true if the given
1087    reg note is CFA related.  */
1088 static const bool reg_note_cfa_p[] = {
1089 #undef REG_CFA_NOTE
1090 #define DEF_REG_NOTE(NAME) false,
1091 #define REG_CFA_NOTE(NAME) true,
1092 #include "reg-notes.def"
1093 #undef REG_CFA_NOTE
1094 #undef DEF_REG_NOTE
1095   false
1096 };
1097 
1098 /* Return true if I1 and I2 have identical CFA notes (the same order
1099    and equivalent content).  */
1100 
1101 static bool
insns_have_identical_cfa_notes(rtx_insn * i1,rtx_insn * i2)1102 insns_have_identical_cfa_notes (rtx_insn *i1, rtx_insn *i2)
1103 {
1104   rtx n1, n2;
1105   for (n1 = REG_NOTES (i1), n2 = REG_NOTES (i2); ;
1106        n1 = XEXP (n1, 1), n2 = XEXP (n2, 1))
1107     {
1108       /* Skip over reg notes not related to CFI information.  */
1109       while (n1 && !reg_note_cfa_p[REG_NOTE_KIND (n1)])
1110 	n1 = XEXP (n1, 1);
1111       while (n2 && !reg_note_cfa_p[REG_NOTE_KIND (n2)])
1112 	n2 = XEXP (n2, 1);
1113       if (n1 == NULL_RTX && n2 == NULL_RTX)
1114 	return true;
1115       if (n1 == NULL_RTX || n2 == NULL_RTX)
1116 	return false;
1117       if (XEXP (n1, 0) == XEXP (n2, 0))
1118 	;
1119       else if (XEXP (n1, 0) == NULL_RTX || XEXP (n2, 0) == NULL_RTX)
1120 	return false;
1121       else if (!(reload_completed
1122 		 ? rtx_renumbered_equal_p (XEXP (n1, 0), XEXP (n2, 0))
1123 		 : rtx_equal_p (XEXP (n1, 0), XEXP (n2, 0))))
1124 	return false;
1125     }
1126 }
1127 
1128 /* Examine I1 and I2 and return:
1129    - dir_forward if I1 can be replaced by I2, or
1130    - dir_backward if I2 can be replaced by I1, or
1131    - dir_both if both are the case.  */
1132 
1133 static enum replace_direction
old_insns_match_p(int mode ATTRIBUTE_UNUSED,rtx_insn * i1,rtx_insn * i2)1134 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx_insn *i1, rtx_insn *i2)
1135 {
1136   rtx p1, p2;
1137 
1138   /* Verify that I1 and I2 are equivalent.  */
1139   if (GET_CODE (i1) != GET_CODE (i2))
1140     return dir_none;
1141 
1142   /* __builtin_unreachable() may lead to empty blocks (ending with
1143      NOTE_INSN_BASIC_BLOCK).  They may be crossjumped. */
1144   if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
1145     return dir_both;
1146 
1147   /* ??? Do not allow cross-jumping between different stack levels.  */
1148   p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL);
1149   p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL);
1150   if (p1 && p2)
1151     {
1152       p1 = XEXP (p1, 0);
1153       p2 = XEXP (p2, 0);
1154       if (!rtx_equal_p (p1, p2))
1155         return dir_none;
1156 
1157       /* ??? Worse, this adjustment had better be constant lest we
1158          have differing incoming stack levels.  */
1159       if (!frame_pointer_needed
1160 	  && known_eq (find_args_size_adjust (i1), HOST_WIDE_INT_MIN))
1161 	return dir_none;
1162     }
1163   else if (p1 || p2)
1164     return dir_none;
1165 
1166   /* Do not allow cross-jumping between frame related insns and other
1167      insns.  */
1168   if (RTX_FRAME_RELATED_P (i1) != RTX_FRAME_RELATED_P (i2))
1169     return dir_none;
1170 
1171   p1 = PATTERN (i1);
1172   p2 = PATTERN (i2);
1173 
1174   if (GET_CODE (p1) != GET_CODE (p2))
1175     return dir_none;
1176 
1177   /* If this is a CALL_INSN, compare register usage information.
1178      If we don't check this on stack register machines, the two
1179      CALL_INSNs might be merged leaving reg-stack.c with mismatching
1180      numbers of stack registers in the same basic block.
1181      If we don't check this on machines with delay slots, a delay slot may
1182      be filled that clobbers a parameter expected by the subroutine.
1183 
1184      ??? We take the simple route for now and assume that if they're
1185      equal, they were constructed identically.
1186 
1187      Also check for identical exception regions.  */
1188 
1189   if (CALL_P (i1))
1190     {
1191       /* Ensure the same EH region.  */
1192       rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
1193       rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);
1194 
1195       if (!n1 && n2)
1196 	return dir_none;
1197 
1198       if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1199 	return dir_none;
1200 
1201       if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1202 			CALL_INSN_FUNCTION_USAGE (i2))
1203 	  || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
1204 	return dir_none;
1205 
1206       /* For address sanitizer, never crossjump __asan_report_* builtins,
1207 	 otherwise errors might be reported on incorrect lines.  */
1208       if (flag_sanitize & SANITIZE_ADDRESS)
1209 	{
1210 	  rtx call = get_call_rtx_from (i1);
1211 	  if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
1212 	    {
1213 	      rtx symbol = XEXP (XEXP (call, 0), 0);
1214 	      if (SYMBOL_REF_DECL (symbol)
1215 		  && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
1216 		{
1217 		  if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
1218 		       == BUILT_IN_NORMAL)
1219 		      && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1220 			 >= BUILT_IN_ASAN_REPORT_LOAD1
1221 		      && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1222 			 <= BUILT_IN_ASAN_STOREN)
1223 		    return dir_none;
1224 		}
1225 	    }
1226 	}
1227     }
1228 
1229   /* If both i1 and i2 are frame related, verify all the CFA notes
1230      in the same order and with the same content.  */
1231   if (RTX_FRAME_RELATED_P (i1) && !insns_have_identical_cfa_notes (i1, i2))
1232     return dir_none;
1233 
1234 #ifdef STACK_REGS
1235   /* If cross_jump_death_matters is not 0, the insn's mode
1236      indicates whether or not the insn contains any stack-like
1237      regs.  */
1238 
1239   if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1240     {
1241       /* If register stack conversion has already been done, then
1242 	 death notes must also be compared before it is certain that
1243 	 the two instruction streams match.  */
1244 
1245       rtx note;
1246       HARD_REG_SET i1_regset, i2_regset;
1247 
1248       CLEAR_HARD_REG_SET (i1_regset);
1249       CLEAR_HARD_REG_SET (i2_regset);
1250 
1251       for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1252 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1253 	  SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1254 
1255       for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1256 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1257 	  SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1258 
1259       if (!hard_reg_set_equal_p (i1_regset, i2_regset))
1260 	return dir_none;
1261     }
1262 #endif
1263 
1264   if (reload_completed
1265       ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1266     return dir_both;
1267 
1268   return can_replace_by (i1, i2);
1269 }
1270 
1271 /* When comparing insns I1 and I2 in flow_find_cross_jump or
1272    flow_find_head_matching_sequence, ensure the notes match.  */
1273 
1274 static void
merge_notes(rtx_insn * i1,rtx_insn * i2)1275 merge_notes (rtx_insn *i1, rtx_insn *i2)
1276 {
1277   /* If the merged insns have different REG_EQUAL notes, then
1278      remove them.  */
1279   rtx equiv1 = find_reg_equal_equiv_note (i1);
1280   rtx equiv2 = find_reg_equal_equiv_note (i2);
1281 
1282   if (equiv1 && !equiv2)
1283     remove_note (i1, equiv1);
1284   else if (!equiv1 && equiv2)
1285     remove_note (i2, equiv2);
1286   else if (equiv1 && equiv2
1287 	   && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1288     {
1289       remove_note (i1, equiv1);
1290       remove_note (i2, equiv2);
1291     }
1292 }
1293 
1294  /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
1295     resulting insn in I1, and the corresponding bb in BB1.  At the head of a
1296     bb, if there is a predecessor bb that reaches this bb via fallthru, and
1297     FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
1298     DID_FALLTHRU.  Otherwise, stops at the head of the bb.  */
1299 
1300 static void
walk_to_nondebug_insn(rtx_insn ** i1,basic_block * bb1,bool follow_fallthru,bool * did_fallthru)1301 walk_to_nondebug_insn (rtx_insn **i1, basic_block *bb1, bool follow_fallthru,
1302                        bool *did_fallthru)
1303 {
1304   edge fallthru;
1305 
1306   *did_fallthru = false;
1307 
1308   /* Ignore notes.  */
1309   while (!NONDEBUG_INSN_P (*i1))
1310     {
1311       if (*i1 != BB_HEAD (*bb1))
1312         {
1313           *i1 = PREV_INSN (*i1);
1314           continue;
1315         }
1316 
1317       if (!follow_fallthru)
1318         return;
1319 
1320       fallthru = find_fallthru_edge ((*bb1)->preds);
1321       if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1322           || !single_succ_p (fallthru->src))
1323         return;
1324 
1325       *bb1 = fallthru->src;
1326       *i1 = BB_END (*bb1);
1327       *did_fallthru = true;
1328      }
1329 }
1330 
1331 /* Look through the insns at the end of BB1 and BB2 and find the longest
1332    sequence that are either equivalent, or allow forward or backward
1333    replacement.  Store the first insns for that sequence in *F1 and *F2 and
1334    return the sequence length.
1335 
1336    DIR_P indicates the allowed replacement direction on function entry, and
1337    the actual replacement direction on function exit.  If NULL, only equivalent
1338    sequences are allowed.
1339 
1340    To simplify callers of this function, if the blocks match exactly,
1341    store the head of the blocks in *F1 and *F2.  */
1342 
1343 int
flow_find_cross_jump(basic_block bb1,basic_block bb2,rtx_insn ** f1,rtx_insn ** f2,enum replace_direction * dir_p)1344 flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx_insn **f1,
1345 		      rtx_insn **f2, enum replace_direction *dir_p)
1346 {
1347   rtx_insn *i1, *i2, *last1, *last2, *afterlast1, *afterlast2;
1348   int ninsns = 0;
1349   enum replace_direction dir, last_dir, afterlast_dir;
1350   bool follow_fallthru, did_fallthru;
1351 
1352   if (dir_p)
1353     dir = *dir_p;
1354   else
1355     dir = dir_both;
1356   afterlast_dir = dir;
1357   last_dir = afterlast_dir;
1358 
1359   /* Skip simple jumps at the end of the blocks.  Complex jumps still
1360      need to be compared for equivalence, which we'll do below.  */
1361 
1362   i1 = BB_END (bb1);
1363   last1 = afterlast1 = last2 = afterlast2 = NULL;
1364   if (onlyjump_p (i1)
1365       || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1366     {
1367       last1 = i1;
1368       i1 = PREV_INSN (i1);
1369     }
1370 
1371   i2 = BB_END (bb2);
1372   if (onlyjump_p (i2)
1373       || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1374     {
1375       last2 = i2;
1376       /* Count everything except for unconditional jump as insn.
1377 	 Don't count any jumps if dir_p is NULL.  */
1378       if (!simplejump_p (i2) && !returnjump_p (i2) && last1 && dir_p)
1379 	ninsns++;
1380       i2 = PREV_INSN (i2);
1381     }
1382 
1383   while (true)
1384     {
1385       /* In the following example, we can replace all jumps to C by jumps to A.
1386 
1387          This removes 4 duplicate insns.
1388          [bb A] insn1            [bb C] insn1
1389                 insn2                   insn2
1390          [bb B] insn3                   insn3
1391                 insn4                   insn4
1392                 jump_insn               jump_insn
1393 
1394          We could also replace all jumps to A by jumps to C, but that leaves B
1395          alive, and removes only 2 duplicate insns.  In a subsequent crossjump
1396          step, all jumps to B would be replaced with jumps to the middle of C,
1397          achieving the same result with more effort.
1398          So we allow only the first possibility, which means that we don't allow
1399          fallthru in the block that's being replaced.  */
1400 
1401       follow_fallthru = dir_p && dir != dir_forward;
1402       walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru);
1403       if (did_fallthru)
1404         dir = dir_backward;
1405 
1406       follow_fallthru = dir_p && dir != dir_backward;
1407       walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru);
1408       if (did_fallthru)
1409         dir = dir_forward;
1410 
1411       if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1412 	break;
1413 
1414       /* Do not turn corssing edge to non-crossing or vice versa after
1415 	 reload. */
1416       if (BB_PARTITION (BLOCK_FOR_INSN (i1))
1417 	  != BB_PARTITION (BLOCK_FOR_INSN (i2))
1418 	  && reload_completed)
1419 	break;
1420 
1421       dir = merge_dir (dir, old_insns_match_p (0, i1, i2));
1422       if (dir == dir_none || (!dir_p && dir != dir_both))
1423 	break;
1424 
1425       merge_memattrs (i1, i2);
1426 
1427       /* Don't begin a cross-jump with a NOTE insn.  */
1428       if (INSN_P (i1))
1429 	{
1430 	  merge_notes (i1, i2);
1431 
1432 	  afterlast1 = last1, afterlast2 = last2;
1433 	  last1 = i1, last2 = i2;
1434 	  afterlast_dir = last_dir;
1435 	  last_dir = dir;
1436 	  if (active_insn_p (i1))
1437 	    ninsns++;
1438 	}
1439 
1440       i1 = PREV_INSN (i1);
1441       i2 = PREV_INSN (i2);
1442     }
1443 
1444   /* Don't allow the insn after a compare to be shared by
1445      cross-jumping unless the compare is also shared.  */
1446   if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
1447       && ! sets_cc0_p (last1))
1448     last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--;
1449 
1450   /* Include preceding notes and labels in the cross-jump.  One,
1451      this may bring us to the head of the blocks as requested above.
1452      Two, it keeps line number notes as matched as may be.  */
1453   if (ninsns)
1454     {
1455       bb1 = BLOCK_FOR_INSN (last1);
1456       while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
1457 	last1 = PREV_INSN (last1);
1458 
1459       if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1460 	last1 = PREV_INSN (last1);
1461 
1462       bb2 = BLOCK_FOR_INSN (last2);
1463       while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
1464 	last2 = PREV_INSN (last2);
1465 
1466       if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1467 	last2 = PREV_INSN (last2);
1468 
1469       *f1 = last1;
1470       *f2 = last2;
1471     }
1472 
1473   if (dir_p)
1474     *dir_p = last_dir;
1475   return ninsns;
1476 }
1477 
1478 /* Like flow_find_cross_jump, except start looking for a matching sequence from
1479    the head of the two blocks.  Do not include jumps at the end.
1480    If STOP_AFTER is nonzero, stop after finding that many matching
1481    instructions.  If STOP_AFTER is zero, count all INSN_P insns, if it is
1482    non-zero, only count active insns.  */
1483 
1484 int
flow_find_head_matching_sequence(basic_block bb1,basic_block bb2,rtx_insn ** f1,rtx_insn ** f2,int stop_after)1485 flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx_insn **f1,
1486 				  rtx_insn **f2, int stop_after)
1487 {
1488   rtx_insn *i1, *i2, *last1, *last2, *beforelast1, *beforelast2;
1489   int ninsns = 0;
1490   edge e;
1491   edge_iterator ei;
1492   int nehedges1 = 0, nehedges2 = 0;
1493 
1494   FOR_EACH_EDGE (e, ei, bb1->succs)
1495     if (e->flags & EDGE_EH)
1496       nehedges1++;
1497   FOR_EACH_EDGE (e, ei, bb2->succs)
1498     if (e->flags & EDGE_EH)
1499       nehedges2++;
1500 
1501   i1 = BB_HEAD (bb1);
1502   i2 = BB_HEAD (bb2);
1503   last1 = beforelast1 = last2 = beforelast2 = NULL;
1504 
1505   while (true)
1506     {
1507       /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG.  */
1508       while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
1509 	{
1510 	  if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG)
1511 	    break;
1512 	  i1 = NEXT_INSN (i1);
1513 	}
1514 
1515       while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
1516 	{
1517 	  if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG)
1518 	    break;
1519 	  i2 = NEXT_INSN (i2);
1520 	}
1521 
1522       if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1))
1523 	  || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2)))
1524 	break;
1525 
1526       if (NOTE_P (i1) || NOTE_P (i2)
1527 	  || JUMP_P (i1) || JUMP_P (i2))
1528 	break;
1529 
1530       /* A sanity check to make sure we're not merging insns with different
1531 	 effects on EH.  If only one of them ends a basic block, it shouldn't
1532 	 have an EH edge; if both end a basic block, there should be the same
1533 	 number of EH edges.  */
1534       if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
1535 	   && nehedges1 > 0)
1536 	  || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
1537 	      && nehedges2 > 0)
1538 	  || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
1539 	      && nehedges1 != nehedges2))
1540 	break;
1541 
1542       if (old_insns_match_p (0, i1, i2) != dir_both)
1543 	break;
1544 
1545       merge_memattrs (i1, i2);
1546 
1547       /* Don't begin a cross-jump with a NOTE insn.  */
1548       if (INSN_P (i1))
1549 	{
1550 	  merge_notes (i1, i2);
1551 
1552 	  beforelast1 = last1, beforelast2 = last2;
1553 	  last1 = i1, last2 = i2;
1554 	  if (!stop_after || active_insn_p (i1))
1555 	    ninsns++;
1556 	}
1557 
1558       if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
1559 	  || (stop_after > 0 && ninsns == stop_after))
1560 	break;
1561 
1562       i1 = NEXT_INSN (i1);
1563       i2 = NEXT_INSN (i2);
1564     }
1565 
1566   /* Don't allow a compare to be shared by cross-jumping unless the insn
1567      after the compare is also shared.  */
1568   if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
1569       && sets_cc0_p (last1))
1570     last1 = beforelast1, last2 = beforelast2, ninsns--;
1571 
1572   if (ninsns)
1573     {
1574       *f1 = last1;
1575       *f2 = last2;
1576     }
1577 
1578   return ninsns;
1579 }
1580 
1581 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1582    the branch instruction.  This means that if we commonize the control
1583    flow before end of the basic block, the semantic remains unchanged.
1584 
1585    We may assume that there exists one edge with a common destination.  */
1586 
1587 static bool
outgoing_edges_match(int mode,basic_block bb1,basic_block bb2)1588 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1589 {
1590   int nehedges1 = 0, nehedges2 = 0;
1591   edge fallthru1 = 0, fallthru2 = 0;
1592   edge e1, e2;
1593   edge_iterator ei;
1594 
1595   /* If we performed shrink-wrapping, edges to the exit block can
1596      only be distinguished for JUMP_INSNs.  The two paths may differ in
1597      whether they went through the prologue.  Sibcalls are fine, we know
1598      that we either didn't need or inserted an epilogue before them.  */
1599   if (crtl->shrink_wrapped
1600       && single_succ_p (bb1)
1601       && single_succ (bb1) == EXIT_BLOCK_PTR_FOR_FN (cfun)
1602       && (!JUMP_P (BB_END (bb1))
1603 	  /* Punt if the only successor is a fake edge to exit, the jump
1604 	     must be some weird one.  */
1605 	  || (single_succ_edge (bb1)->flags & EDGE_FAKE) != 0)
1606       && !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1))))
1607     return false;
1608 
1609   /* If BB1 has only one successor, we may be looking at either an
1610      unconditional jump, or a fake edge to exit.  */
1611   if (single_succ_p (bb1)
1612       && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1613       && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1614     return (single_succ_p (bb2)
1615 	    && (single_succ_edge (bb2)->flags
1616 		& (EDGE_COMPLEX | EDGE_FAKE)) == 0
1617 	    && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1618 
1619   /* Match conditional jumps - this may get tricky when fallthru and branch
1620      edges are crossed.  */
1621   if (EDGE_COUNT (bb1->succs) == 2
1622       && any_condjump_p (BB_END (bb1))
1623       && onlyjump_p (BB_END (bb1)))
1624     {
1625       edge b1, f1, b2, f2;
1626       bool reverse, match;
1627       rtx set1, set2, cond1, cond2;
1628       enum rtx_code code1, code2;
1629 
1630       if (EDGE_COUNT (bb2->succs) != 2
1631 	  || !any_condjump_p (BB_END (bb2))
1632 	  || !onlyjump_p (BB_END (bb2)))
1633 	return false;
1634 
1635       b1 = BRANCH_EDGE (bb1);
1636       b2 = BRANCH_EDGE (bb2);
1637       f1 = FALLTHRU_EDGE (bb1);
1638       f2 = FALLTHRU_EDGE (bb2);
1639 
1640       /* Get around possible forwarders on fallthru edges.  Other cases
1641 	 should be optimized out already.  */
1642       if (FORWARDER_BLOCK_P (f1->dest))
1643 	f1 = single_succ_edge (f1->dest);
1644 
1645       if (FORWARDER_BLOCK_P (f2->dest))
1646 	f2 = single_succ_edge (f2->dest);
1647 
1648       /* To simplify use of this function, return false if there are
1649 	 unneeded forwarder blocks.  These will get eliminated later
1650 	 during cleanup_cfg.  */
1651       if (FORWARDER_BLOCK_P (f1->dest)
1652 	  || FORWARDER_BLOCK_P (f2->dest)
1653 	  || FORWARDER_BLOCK_P (b1->dest)
1654 	  || FORWARDER_BLOCK_P (b2->dest))
1655 	return false;
1656 
1657       if (f1->dest == f2->dest && b1->dest == b2->dest)
1658 	reverse = false;
1659       else if (f1->dest == b2->dest && b1->dest == f2->dest)
1660 	reverse = true;
1661       else
1662 	return false;
1663 
1664       set1 = pc_set (BB_END (bb1));
1665       set2 = pc_set (BB_END (bb2));
1666       if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1667 	  != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1668 	reverse = !reverse;
1669 
1670       cond1 = XEXP (SET_SRC (set1), 0);
1671       cond2 = XEXP (SET_SRC (set2), 0);
1672       code1 = GET_CODE (cond1);
1673       if (reverse)
1674 	code2 = reversed_comparison_code (cond2, BB_END (bb2));
1675       else
1676 	code2 = GET_CODE (cond2);
1677 
1678       if (code2 == UNKNOWN)
1679 	return false;
1680 
1681       /* Verify codes and operands match.  */
1682       match = ((code1 == code2
1683 		&& rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1684 		&& rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1685 	       || (code1 == swap_condition (code2)
1686 		   && rtx_renumbered_equal_p (XEXP (cond1, 1),
1687 					      XEXP (cond2, 0))
1688 		   && rtx_renumbered_equal_p (XEXP (cond1, 0),
1689 					      XEXP (cond2, 1))));
1690 
1691       /* If we return true, we will join the blocks.  Which means that
1692 	 we will only have one branch prediction bit to work with.  Thus
1693 	 we require the existing branches to have probabilities that are
1694 	 roughly similar.  */
1695       if (match
1696 	  && optimize_bb_for_speed_p (bb1)
1697 	  && optimize_bb_for_speed_p (bb2))
1698 	{
1699 	  profile_probability prob2;
1700 
1701 	  if (b1->dest == b2->dest)
1702 	    prob2 = b2->probability;
1703 	  else
1704 	    /* Do not use f2 probability as f2 may be forwarded.  */
1705 	    prob2 = b2->probability.invert ();
1706 
1707 	  /* Fail if the difference in probabilities is greater than 50%.
1708 	     This rules out two well-predicted branches with opposite
1709 	     outcomes.  */
1710 	  if (b1->probability.differs_lot_from_p (prob2))
1711 	    {
1712 	      if (dump_file)
1713 		{
1714 		  fprintf (dump_file,
1715 			   "Outcomes of branch in bb %i and %i differ too"
1716 			   " much (", bb1->index, bb2->index);
1717 		  b1->probability.dump (dump_file);
1718 		  prob2.dump (dump_file);
1719 		  fprintf (dump_file, ")\n");
1720 		}
1721 	      return false;
1722 	    }
1723 	}
1724 
1725       if (dump_file && match)
1726 	fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1727 		 bb1->index, bb2->index);
1728 
1729       return match;
1730     }
1731 
1732   /* Generic case - we are seeing a computed jump, table jump or trapping
1733      instruction.  */
1734 
1735   /* Check whether there are tablejumps in the end of BB1 and BB2.
1736      Return true if they are identical.  */
1737     {
1738       rtx_insn *label1, *label2;
1739       rtx_jump_table_data *table1, *table2;
1740 
1741       if (tablejump_p (BB_END (bb1), &label1, &table1)
1742 	  && tablejump_p (BB_END (bb2), &label2, &table2)
1743 	  && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1744 	{
1745 	  /* The labels should never be the same rtx.  If they really are same
1746 	     the jump tables are same too. So disable crossjumping of blocks BB1
1747 	     and BB2 because when deleting the common insns in the end of BB1
1748 	     by delete_basic_block () the jump table would be deleted too.  */
1749 	  /* If LABEL2 is referenced in BB1->END do not do anything
1750 	     because we would loose information when replacing
1751 	     LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END.  */
1752 	  if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1753 	    {
1754 	      /* Set IDENTICAL to true when the tables are identical.  */
1755 	      bool identical = false;
1756 	      rtx p1, p2;
1757 
1758 	      p1 = PATTERN (table1);
1759 	      p2 = PATTERN (table2);
1760 	      if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1761 		{
1762 		  identical = true;
1763 		}
1764 	      else if (GET_CODE (p1) == ADDR_DIFF_VEC
1765 		       && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1766 		       && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1767 		       && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1768 		{
1769 		  int i;
1770 
1771 		  identical = true;
1772 		  for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1773 		    if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1774 		      identical = false;
1775 		}
1776 
1777 	      if (identical)
1778 		{
1779 		  bool match;
1780 
1781 		  /* Temporarily replace references to LABEL1 with LABEL2
1782 		     in BB1->END so that we could compare the instructions.  */
1783 		  replace_label_in_insn (BB_END (bb1), label1, label2, false);
1784 
1785 		  match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))
1786 			   == dir_both);
1787 		  if (dump_file && match)
1788 		    fprintf (dump_file,
1789 			     "Tablejumps in bb %i and %i match.\n",
1790 			     bb1->index, bb2->index);
1791 
1792 		  /* Set the original label in BB1->END because when deleting
1793 		     a block whose end is a tablejump, the tablejump referenced
1794 		     from the instruction is deleted too.  */
1795 		  replace_label_in_insn (BB_END (bb1), label2, label1, false);
1796 
1797 		  return match;
1798 		}
1799 	    }
1800 	  return false;
1801 	}
1802     }
1803 
1804   /* Find the last non-debug non-note instruction in each bb, except
1805      stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p
1806      handles that case specially. old_insns_match_p does not handle
1807      other types of instruction notes.  */
1808   rtx_insn *last1 = BB_END (bb1);
1809   rtx_insn *last2 = BB_END (bb2);
1810   while (!NOTE_INSN_BASIC_BLOCK_P (last1) &&
1811          (DEBUG_INSN_P (last1) || NOTE_P (last1)))
1812     last1 = PREV_INSN (last1);
1813   while (!NOTE_INSN_BASIC_BLOCK_P (last2) &&
1814          (DEBUG_INSN_P (last2) || NOTE_P (last2)))
1815     last2 = PREV_INSN (last2);
1816   gcc_assert (last1 && last2);
1817 
1818   /* First ensure that the instructions match.  There may be many outgoing
1819      edges so this test is generally cheaper.  */
1820   if (old_insns_match_p (mode, last1, last2) != dir_both)
1821     return false;
1822 
1823   /* Search the outgoing edges, ensure that the counts do match, find possible
1824      fallthru and exception handling edges since these needs more
1825      validation.  */
1826   if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1827     return false;
1828 
1829   bool nonfakeedges = false;
1830   FOR_EACH_EDGE (e1, ei, bb1->succs)
1831     {
1832       e2 = EDGE_SUCC (bb2, ei.index);
1833 
1834       if ((e1->flags & EDGE_FAKE) == 0)
1835 	nonfakeedges = true;
1836 
1837       if (e1->flags & EDGE_EH)
1838 	nehedges1++;
1839 
1840       if (e2->flags & EDGE_EH)
1841 	nehedges2++;
1842 
1843       if (e1->flags & EDGE_FALLTHRU)
1844 	fallthru1 = e1;
1845       if (e2->flags & EDGE_FALLTHRU)
1846 	fallthru2 = e2;
1847     }
1848 
1849   /* If number of edges of various types does not match, fail.  */
1850   if (nehedges1 != nehedges2
1851       || (fallthru1 != 0) != (fallthru2 != 0))
1852     return false;
1853 
1854   /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors
1855      and the last real insn doesn't have REG_ARGS_SIZE note, don't
1856      attempt to optimize, as the two basic blocks might have different
1857      REG_ARGS_SIZE depths.  For noreturn calls and unconditional
1858      traps there should be REG_ARG_SIZE notes, they could be missing
1859      for __builtin_unreachable () uses though.  */
1860   if (!nonfakeedges
1861       && !ACCUMULATE_OUTGOING_ARGS
1862       && (!INSN_P (last1)
1863           || !find_reg_note (last1, REG_ARGS_SIZE, NULL)))
1864     return false;
1865 
1866   /* fallthru edges must be forwarded to the same destination.  */
1867   if (fallthru1)
1868     {
1869       basic_block d1 = (forwarder_block_p (fallthru1->dest)
1870 			? single_succ (fallthru1->dest): fallthru1->dest);
1871       basic_block d2 = (forwarder_block_p (fallthru2->dest)
1872 			? single_succ (fallthru2->dest): fallthru2->dest);
1873 
1874       if (d1 != d2)
1875 	return false;
1876     }
1877 
1878   /* Ensure the same EH region.  */
1879   {
1880     rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1881     rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1882 
1883     if (!n1 && n2)
1884       return false;
1885 
1886     if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1887       return false;
1888   }
1889 
1890   /* The same checks as in try_crossjump_to_edge. It is required for RTL
1891      version of sequence abstraction.  */
1892   FOR_EACH_EDGE (e1, ei, bb2->succs)
1893     {
1894       edge e2;
1895       edge_iterator ei;
1896       basic_block d1 = e1->dest;
1897 
1898       if (FORWARDER_BLOCK_P (d1))
1899         d1 = EDGE_SUCC (d1, 0)->dest;
1900 
1901       FOR_EACH_EDGE (e2, ei, bb1->succs)
1902         {
1903           basic_block d2 = e2->dest;
1904           if (FORWARDER_BLOCK_P (d2))
1905             d2 = EDGE_SUCC (d2, 0)->dest;
1906           if (d1 == d2)
1907             break;
1908         }
1909 
1910       if (!e2)
1911         return false;
1912     }
1913 
1914   return true;
1915 }
1916 
1917 /* Returns true if BB basic block has a preserve label.  */
1918 
1919 static bool
block_has_preserve_label(basic_block bb)1920 block_has_preserve_label (basic_block bb)
1921 {
1922   return (bb
1923           && block_label (bb)
1924           && LABEL_PRESERVE_P (block_label (bb)));
1925 }
1926 
1927 /* E1 and E2 are edges with the same destination block.  Search their
1928    predecessors for common code.  If found, redirect control flow from
1929    (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
1930    or the other way around (dir_backward).  DIR specifies the allowed
1931    replacement direction.  */
1932 
1933 static bool
try_crossjump_to_edge(int mode,edge e1,edge e2,enum replace_direction dir)1934 try_crossjump_to_edge (int mode, edge e1, edge e2,
1935                        enum replace_direction dir)
1936 {
1937   int nmatch;
1938   basic_block src1 = e1->src, src2 = e2->src;
1939   basic_block redirect_to, redirect_from, to_remove;
1940   basic_block osrc1, osrc2, redirect_edges_to, tmp;
1941   rtx_insn *newpos1, *newpos2;
1942   edge s;
1943   edge_iterator ei;
1944 
1945   newpos1 = newpos2 = NULL;
1946 
1947   /* Search backward through forwarder blocks.  We don't need to worry
1948      about multiple entry or chained forwarders, as they will be optimized
1949      away.  We do this to look past the unconditional jump following a
1950      conditional jump that is required due to the current CFG shape.  */
1951   if (single_pred_p (src1)
1952       && FORWARDER_BLOCK_P (src1))
1953     e1 = single_pred_edge (src1), src1 = e1->src;
1954 
1955   if (single_pred_p (src2)
1956       && FORWARDER_BLOCK_P (src2))
1957     e2 = single_pred_edge (src2), src2 = e2->src;
1958 
1959   /* Nothing to do if we reach ENTRY, or a common source block.  */
1960   if (src1 == ENTRY_BLOCK_PTR_FOR_FN (cfun) || src2
1961       == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1962     return false;
1963   if (src1 == src2)
1964     return false;
1965 
1966   /* Seeing more than 1 forwarder blocks would confuse us later...  */
1967   if (FORWARDER_BLOCK_P (e1->dest)
1968       && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1969     return false;
1970 
1971   if (FORWARDER_BLOCK_P (e2->dest)
1972       && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1973     return false;
1974 
1975   /* Likewise with dead code (possibly newly created by the other optimizations
1976      of cfg_cleanup).  */
1977   if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1978     return false;
1979 
1980   /* Do not turn corssing edge to non-crossing or vice versa after reload.  */
1981   if (BB_PARTITION (src1) != BB_PARTITION (src2)
1982       && reload_completed)
1983     return false;
1984 
1985   /* Look for the common insn sequence, part the first ...  */
1986   if (!outgoing_edges_match (mode, src1, src2))
1987     return false;
1988 
1989   /* ... and part the second.  */
1990   nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir);
1991 
1992   osrc1 = src1;
1993   osrc2 = src2;
1994   if (newpos1 != NULL_RTX)
1995     src1 = BLOCK_FOR_INSN (newpos1);
1996   if (newpos2 != NULL_RTX)
1997     src2 = BLOCK_FOR_INSN (newpos2);
1998 
1999   /* Check that SRC1 and SRC2 have preds again.  They may have changed
2000      above due to the call to flow_find_cross_jump.  */
2001   if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
2002     return false;
2003 
2004   if (dir == dir_backward)
2005     {
2006       std::swap (osrc1, osrc2);
2007       std::swap (src1, src2);
2008       std::swap (e1, e2);
2009       std::swap (newpos1, newpos2);
2010     }
2011 
2012   /* Don't proceed with the crossjump unless we found a sufficient number
2013      of matching instructions or the 'from' block was totally matched
2014      (such that its predecessors will hopefully be redirected and the
2015      block removed).  */
2016   if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
2017       && (newpos1 != BB_HEAD (src1)))
2018     return false;
2019 
2020   /* Avoid deleting preserve label when redirecting ABNORMAL edges.  */
2021   if (block_has_preserve_label (e1->dest)
2022       && (e1->flags & EDGE_ABNORMAL))
2023     return false;
2024 
2025   /* Here we know that the insns in the end of SRC1 which are common with SRC2
2026      will be deleted.
2027      If we have tablejumps in the end of SRC1 and SRC2
2028      they have been already compared for equivalence in outgoing_edges_match ()
2029      so replace the references to TABLE1 by references to TABLE2.  */
2030   {
2031       rtx_insn *label1, *label2;
2032       rtx_jump_table_data *table1, *table2;
2033 
2034       if (tablejump_p (BB_END (osrc1), &label1, &table1)
2035 	  && tablejump_p (BB_END (osrc2), &label2, &table2)
2036 	  && label1 != label2)
2037 	{
2038 	  rtx_insn *insn;
2039 
2040 	  /* Replace references to LABEL1 with LABEL2.  */
2041 	  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2042 	    {
2043 	      /* Do not replace the label in SRC1->END because when deleting
2044 		 a block whose end is a tablejump, the tablejump referenced
2045 		 from the instruction is deleted too.  */
2046 	      if (insn != BB_END (osrc1))
2047 		replace_label_in_insn (insn, label1, label2, true);
2048 	    }
2049 	}
2050   }
2051 
2052   /* Avoid splitting if possible.  We must always split when SRC2 has
2053      EH predecessor edges, or we may end up with basic blocks with both
2054      normal and EH predecessor edges.  */
2055   if (newpos2 == BB_HEAD (src2)
2056       && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
2057     redirect_to = src2;
2058   else
2059     {
2060       if (newpos2 == BB_HEAD (src2))
2061 	{
2062 	  /* Skip possible basic block header.  */
2063 	  if (LABEL_P (newpos2))
2064 	    newpos2 = NEXT_INSN (newpos2);
2065 	  while (DEBUG_INSN_P (newpos2))
2066 	    newpos2 = NEXT_INSN (newpos2);
2067 	  if (NOTE_P (newpos2))
2068 	    newpos2 = NEXT_INSN (newpos2);
2069 	  while (DEBUG_INSN_P (newpos2))
2070 	    newpos2 = NEXT_INSN (newpos2);
2071 	}
2072 
2073       if (dump_file)
2074 	fprintf (dump_file, "Splitting bb %i before %i insns\n",
2075 		 src2->index, nmatch);
2076       redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
2077     }
2078 
2079   if (dump_file)
2080     fprintf (dump_file,
2081 	     "Cross jumping from bb %i to bb %i; %i common insns\n",
2082 	     src1->index, src2->index, nmatch);
2083 
2084   /* We may have some registers visible through the block.  */
2085   df_set_bb_dirty (redirect_to);
2086 
2087   if (osrc2 == src2)
2088     redirect_edges_to = redirect_to;
2089   else
2090     redirect_edges_to = osrc2;
2091 
2092   /* Recompute the counts of destinations of outgoing edges.  */
2093   FOR_EACH_EDGE (s, ei, redirect_edges_to->succs)
2094     {
2095       edge s2;
2096       edge_iterator ei;
2097       basic_block d = s->dest;
2098 
2099       if (FORWARDER_BLOCK_P (d))
2100 	d = single_succ (d);
2101 
2102       FOR_EACH_EDGE (s2, ei, src1->succs)
2103 	{
2104 	  basic_block d2 = s2->dest;
2105 	  if (FORWARDER_BLOCK_P (d2))
2106 	    d2 = single_succ (d2);
2107 	  if (d == d2)
2108 	    break;
2109 	}
2110 
2111       /* Take care to update possible forwarder blocks.  We verified
2112 	 that there is no more than one in the chain, so we can't run
2113 	 into infinite loop.  */
2114       if (FORWARDER_BLOCK_P (s->dest))
2115 	s->dest->count += s->count ();
2116 
2117       if (FORWARDER_BLOCK_P (s2->dest))
2118 	s2->dest->count -= s->count ();
2119 
2120       s->probability = s->probability.combine_with_count
2121 			  (redirect_edges_to->count,
2122 			   s2->probability, src1->count);
2123     }
2124 
2125   /* Adjust count for the block.  An earlier jump
2126      threading pass may have left the profile in an inconsistent
2127      state (see update_bb_profile_for_threading) so we must be
2128      prepared for overflows.  */
2129   tmp = redirect_to;
2130   do
2131     {
2132       tmp->count += src1->count;
2133       if (tmp == redirect_edges_to)
2134         break;
2135       tmp = find_fallthru_edge (tmp->succs)->dest;
2136     }
2137   while (true);
2138   update_br_prob_note (redirect_edges_to);
2139 
2140   /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1.  */
2141 
2142   /* Skip possible basic block header.  */
2143   if (LABEL_P (newpos1))
2144     newpos1 = NEXT_INSN (newpos1);
2145 
2146   while (DEBUG_INSN_P (newpos1))
2147     newpos1 = NEXT_INSN (newpos1);
2148 
2149   if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
2150     newpos1 = NEXT_INSN (newpos1);
2151 
2152   while (DEBUG_INSN_P (newpos1))
2153     newpos1 = NEXT_INSN (newpos1);
2154 
2155   redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
2156   to_remove = single_succ (redirect_from);
2157 
2158   redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
2159   delete_basic_block (to_remove);
2160 
2161   update_forwarder_flag (redirect_from);
2162   if (redirect_to != src2)
2163     update_forwarder_flag (src2);
2164 
2165   return true;
2166 }
2167 
2168 /* Search the predecessors of BB for common insn sequences.  When found,
2169    share code between them by redirecting control flow.  Return true if
2170    any changes made.  */
2171 
2172 static bool
try_crossjump_bb(int mode,basic_block bb)2173 try_crossjump_bb (int mode, basic_block bb)
2174 {
2175   edge e, e2, fallthru;
2176   bool changed;
2177   unsigned max, ix, ix2;
2178 
2179   /* Nothing to do if there is not at least two incoming edges.  */
2180   if (EDGE_COUNT (bb->preds) < 2)
2181     return false;
2182 
2183   /* Don't crossjump if this block ends in a computed jump,
2184      unless we are optimizing for size.  */
2185   if (optimize_bb_for_size_p (bb)
2186       && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
2187       && computed_jump_p (BB_END (bb)))
2188     return false;
2189 
2190   /* If we are partitioning hot/cold basic blocks, we don't want to
2191      mess up unconditional or indirect jumps that cross between hot
2192      and cold sections.
2193 
2194      Basic block partitioning may result in some jumps that appear to
2195      be optimizable (or blocks that appear to be mergeable), but which really
2196      must be left untouched (they are required to make it safely across
2197      partition boundaries).  See the comments at the top of
2198      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
2199 
2200   if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
2201 					BB_PARTITION (EDGE_PRED (bb, 1)->src)
2202       || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
2203     return false;
2204 
2205   /* It is always cheapest to redirect a block that ends in a branch to
2206      a block that falls through into BB, as that adds no branches to the
2207      program.  We'll try that combination first.  */
2208   fallthru = NULL;
2209   max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
2210 
2211   if (EDGE_COUNT (bb->preds) > max)
2212     return false;
2213 
2214   fallthru = find_fallthru_edge (bb->preds);
2215 
2216   changed = false;
2217   for (ix = 0; ix < EDGE_COUNT (bb->preds);)
2218     {
2219       e = EDGE_PRED (bb, ix);
2220       ix++;
2221 
2222       /* As noted above, first try with the fallthru predecessor (or, a
2223 	 fallthru predecessor if we are in cfglayout mode).  */
2224       if (fallthru)
2225 	{
2226 	  /* Don't combine the fallthru edge into anything else.
2227 	     If there is a match, we'll do it the other way around.  */
2228 	  if (e == fallthru)
2229 	    continue;
2230 	  /* If nothing changed since the last attempt, there is nothing
2231 	     we can do.  */
2232 	  if (!first_pass
2233 	      && !((e->src->flags & BB_MODIFIED)
2234 		   || (fallthru->src->flags & BB_MODIFIED)))
2235 	    continue;
2236 
2237 	  if (try_crossjump_to_edge (mode, e, fallthru, dir_forward))
2238 	    {
2239 	      changed = true;
2240 	      ix = 0;
2241 	      continue;
2242 	    }
2243 	}
2244 
2245       /* Non-obvious work limiting check: Recognize that we're going
2246 	 to call try_crossjump_bb on every basic block.  So if we have
2247 	 two blocks with lots of outgoing edges (a switch) and they
2248 	 share lots of common destinations, then we would do the
2249 	 cross-jump check once for each common destination.
2250 
2251 	 Now, if the blocks actually are cross-jump candidates, then
2252 	 all of their destinations will be shared.  Which means that
2253 	 we only need check them for cross-jump candidacy once.  We
2254 	 can eliminate redundant checks of crossjump(A,B) by arbitrarily
2255 	 choosing to do the check from the block for which the edge
2256 	 in question is the first successor of A.  */
2257       if (EDGE_SUCC (e->src, 0) != e)
2258 	continue;
2259 
2260       for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++)
2261 	{
2262 	  e2 = EDGE_PRED (bb, ix2);
2263 
2264 	  if (e2 == e)
2265 	    continue;
2266 
2267 	  /* We've already checked the fallthru edge above.  */
2268 	  if (e2 == fallthru)
2269 	    continue;
2270 
2271 	  /* The "first successor" check above only prevents multiple
2272 	     checks of crossjump(A,B).  In order to prevent redundant
2273 	     checks of crossjump(B,A), require that A be the block
2274 	     with the lowest index.  */
2275 	  if (e->src->index > e2->src->index)
2276 	    continue;
2277 
2278 	  /* If nothing changed since the last attempt, there is nothing
2279 	     we can do.  */
2280 	  if (!first_pass
2281 	      && !((e->src->flags & BB_MODIFIED)
2282 		   || (e2->src->flags & BB_MODIFIED)))
2283 	    continue;
2284 
2285 	  /* Both e and e2 are not fallthru edges, so we can crossjump in either
2286 	     direction.  */
2287 	  if (try_crossjump_to_edge (mode, e, e2, dir_both))
2288 	    {
2289 	      changed = true;
2290 	      ix = 0;
2291 	      break;
2292 	    }
2293 	}
2294     }
2295 
2296   if (changed)
2297     crossjumps_occurred = true;
2298 
2299   return changed;
2300 }
2301 
2302 /* Search the successors of BB for common insn sequences.  When found,
2303    share code between them by moving it across the basic block
2304    boundary.  Return true if any changes made.  */
2305 
2306 static bool
try_head_merge_bb(basic_block bb)2307 try_head_merge_bb (basic_block bb)
2308 {
2309   basic_block final_dest_bb = NULL;
2310   int max_match = INT_MAX;
2311   edge e0;
2312   rtx_insn **headptr, **currptr, **nextptr;
2313   bool changed, moveall;
2314   unsigned ix;
2315   rtx_insn *e0_last_head;
2316   rtx cond;
2317   rtx_insn *move_before;
2318   unsigned nedges = EDGE_COUNT (bb->succs);
2319   rtx_insn *jump = BB_END (bb);
2320   regset live, live_union;
2321 
2322   /* Nothing to do if there is not at least two outgoing edges.  */
2323   if (nedges < 2)
2324     return false;
2325 
2326   /* Don't crossjump if this block ends in a computed jump,
2327      unless we are optimizing for size.  */
2328   if (optimize_bb_for_size_p (bb)
2329       && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
2330       && computed_jump_p (BB_END (bb)))
2331     return false;
2332 
2333   cond = get_condition (jump, &move_before, true, false);
2334   if (cond == NULL_RTX)
2335     {
2336       if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2337 	move_before = prev_nonnote_nondebug_insn (jump);
2338       else
2339 	move_before = jump;
2340     }
2341 
2342   for (ix = 0; ix < nedges; ix++)
2343     if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
2344       return false;
2345 
2346   for (ix = 0; ix < nedges; ix++)
2347     {
2348       edge e = EDGE_SUCC (bb, ix);
2349       basic_block other_bb = e->dest;
2350 
2351       if (df_get_bb_dirty (other_bb))
2352 	{
2353 	  block_was_dirty = true;
2354 	  return false;
2355 	}
2356 
2357       if (e->flags & EDGE_ABNORMAL)
2358 	return false;
2359 
2360       /* Normally, all destination blocks must only be reachable from this
2361 	 block, i.e. they must have one incoming edge.
2362 
2363 	 There is one special case we can handle, that of multiple consecutive
2364 	 jumps where the first jumps to one of the targets of the second jump.
2365 	 This happens frequently in switch statements for default labels.
2366 	 The structure is as follows:
2367 	 FINAL_DEST_BB
2368 	 ....
2369 	 if (cond) jump A;
2370 	 fall through
2371 	 BB
2372 	 jump with targets A, B, C, D...
2373 	 A
2374 	 has two incoming edges, from FINAL_DEST_BB and BB
2375 
2376 	 In this case, we can try to move the insns through BB and into
2377 	 FINAL_DEST_BB.  */
2378       if (EDGE_COUNT (other_bb->preds) != 1)
2379 	{
2380 	  edge incoming_edge, incoming_bb_other_edge;
2381 	  edge_iterator ei;
2382 
2383 	  if (final_dest_bb != NULL
2384 	      || EDGE_COUNT (other_bb->preds) != 2)
2385 	    return false;
2386 
2387 	  /* We must be able to move the insns across the whole block.  */
2388 	  move_before = BB_HEAD (bb);
2389 	  while (!NONDEBUG_INSN_P (move_before))
2390 	    move_before = NEXT_INSN (move_before);
2391 
2392 	  if (EDGE_COUNT (bb->preds) != 1)
2393 	    return false;
2394 	  incoming_edge = EDGE_PRED (bb, 0);
2395 	  final_dest_bb = incoming_edge->src;
2396 	  if (EDGE_COUNT (final_dest_bb->succs) != 2)
2397 	    return false;
2398 	  FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs)
2399 	    if (incoming_bb_other_edge != incoming_edge)
2400 	      break;
2401 	  if (incoming_bb_other_edge->dest != other_bb)
2402 	    return false;
2403 	}
2404     }
2405 
2406   e0 = EDGE_SUCC (bb, 0);
2407   e0_last_head = NULL;
2408   changed = false;
2409 
2410   for (ix = 1; ix < nedges; ix++)
2411     {
2412       edge e = EDGE_SUCC (bb, ix);
2413       rtx_insn *e0_last, *e_last;
2414       int nmatch;
2415 
2416       nmatch = flow_find_head_matching_sequence (e0->dest, e->dest,
2417 						 &e0_last, &e_last, 0);
2418       if (nmatch == 0)
2419 	return false;
2420 
2421       if (nmatch < max_match)
2422 	{
2423 	  max_match = nmatch;
2424 	  e0_last_head = e0_last;
2425 	}
2426     }
2427 
2428   /* If we matched an entire block, we probably have to avoid moving the
2429      last insn.  */
2430   if (max_match > 0
2431       && e0_last_head == BB_END (e0->dest)
2432       && (find_reg_note (e0_last_head, REG_EH_REGION, 0)
2433 	  || control_flow_insn_p (e0_last_head)))
2434     {
2435       max_match--;
2436       if (max_match == 0)
2437 	return false;
2438       e0_last_head = prev_real_nondebug_insn (e0_last_head);
2439     }
2440 
2441   if (max_match == 0)
2442     return false;
2443 
2444   /* We must find a union of the live registers at each of the end points.  */
2445   live = BITMAP_ALLOC (NULL);
2446   live_union = BITMAP_ALLOC (NULL);
2447 
2448   currptr = XNEWVEC (rtx_insn *, nedges);
2449   headptr = XNEWVEC (rtx_insn *, nedges);
2450   nextptr = XNEWVEC (rtx_insn *, nedges);
2451 
2452   for (ix = 0; ix < nedges; ix++)
2453     {
2454       int j;
2455       basic_block merge_bb = EDGE_SUCC (bb, ix)->dest;
2456       rtx_insn *head = BB_HEAD (merge_bb);
2457 
2458       while (!NONDEBUG_INSN_P (head))
2459 	head = NEXT_INSN (head);
2460       headptr[ix] = head;
2461       currptr[ix] = head;
2462 
2463       /* Compute the end point and live information  */
2464       for (j = 1; j < max_match; j++)
2465 	do
2466 	  head = NEXT_INSN (head);
2467 	while (!NONDEBUG_INSN_P (head));
2468       simulate_backwards_to_point (merge_bb, live, head);
2469       IOR_REG_SET (live_union, live);
2470     }
2471 
2472   /* If we're moving across two blocks, verify the validity of the
2473      first move, then adjust the target and let the loop below deal
2474      with the final move.  */
2475   if (final_dest_bb != NULL)
2476     {
2477       rtx_insn *move_upto;
2478 
2479       moveall = can_move_insns_across (currptr[0], e0_last_head, move_before,
2480 				       jump, e0->dest, live_union,
2481 				       NULL, &move_upto);
2482       if (!moveall)
2483 	{
2484 	  if (move_upto == NULL_RTX)
2485 	    goto out;
2486 
2487 	  while (e0_last_head != move_upto)
2488 	    {
2489 	      df_simulate_one_insn_backwards (e0->dest, e0_last_head,
2490 					      live_union);
2491 	      e0_last_head = PREV_INSN (e0_last_head);
2492 	    }
2493 	}
2494       if (e0_last_head == NULL_RTX)
2495 	goto out;
2496 
2497       jump = BB_END (final_dest_bb);
2498       cond = get_condition (jump, &move_before, true, false);
2499       if (cond == NULL_RTX)
2500 	{
2501 	  if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2502 	    move_before = prev_nonnote_nondebug_insn (jump);
2503 	  else
2504 	    move_before = jump;
2505 	}
2506     }
2507 
2508   do
2509     {
2510       rtx_insn *move_upto;
2511       moveall = can_move_insns_across (currptr[0], e0_last_head,
2512 				       move_before, jump, e0->dest, live_union,
2513 				       NULL, &move_upto);
2514       if (!moveall && move_upto == NULL_RTX)
2515 	{
2516 	  if (jump == move_before)
2517 	    break;
2518 
2519 	  /* Try again, using a different insertion point.  */
2520 	  move_before = jump;
2521 
2522 	  /* Don't try moving before a cc0 user, as that may invalidate
2523 	     the cc0.  */
2524 	  if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2525 	    break;
2526 
2527 	  continue;
2528 	}
2529 
2530       if (final_dest_bb && !moveall)
2531 	/* We haven't checked whether a partial move would be OK for the first
2532 	   move, so we have to fail this case.  */
2533 	break;
2534 
2535       changed = true;
2536       for (;;)
2537 	{
2538 	  if (currptr[0] == move_upto)
2539 	    break;
2540 	  for (ix = 0; ix < nedges; ix++)
2541 	    {
2542 	      rtx_insn *curr = currptr[ix];
2543 	      do
2544 		curr = NEXT_INSN (curr);
2545 	      while (!NONDEBUG_INSN_P (curr));
2546 	      currptr[ix] = curr;
2547 	    }
2548 	}
2549 
2550       /* If we can't currently move all of the identical insns, remember
2551 	 each insn after the range that we'll merge.  */
2552       if (!moveall)
2553 	for (ix = 0; ix < nedges; ix++)
2554 	  {
2555 	    rtx_insn *curr = currptr[ix];
2556 	    do
2557 	      curr = NEXT_INSN (curr);
2558 	    while (!NONDEBUG_INSN_P (curr));
2559 	    nextptr[ix] = curr;
2560 	  }
2561 
2562       reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before));
2563       df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest);
2564       if (final_dest_bb != NULL)
2565 	df_set_bb_dirty (final_dest_bb);
2566       df_set_bb_dirty (bb);
2567       for (ix = 1; ix < nedges; ix++)
2568 	{
2569 	  df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest);
2570 	  delete_insn_chain (headptr[ix], currptr[ix], false);
2571 	}
2572       if (!moveall)
2573 	{
2574 	  if (jump == move_before)
2575 	    break;
2576 
2577 	  /* For the unmerged insns, try a different insertion point.  */
2578 	  move_before = jump;
2579 
2580 	  /* Don't try moving before a cc0 user, as that may invalidate
2581 	     the cc0.  */
2582 	  if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2583 	    break;
2584 
2585 	  for (ix = 0; ix < nedges; ix++)
2586 	    currptr[ix] = headptr[ix] = nextptr[ix];
2587 	}
2588     }
2589   while (!moveall);
2590 
2591  out:
2592   free (currptr);
2593   free (headptr);
2594   free (nextptr);
2595 
2596   crossjumps_occurred |= changed;
2597 
2598   return changed;
2599 }
2600 
2601 /* Return true if BB contains just bb note, or bb note followed
2602    by only DEBUG_INSNs.  */
2603 
2604 static bool
trivially_empty_bb_p(basic_block bb)2605 trivially_empty_bb_p (basic_block bb)
2606 {
2607   rtx_insn *insn = BB_END (bb);
2608 
2609   while (1)
2610     {
2611       if (insn == BB_HEAD (bb))
2612 	return true;
2613       if (!DEBUG_INSN_P (insn))
2614 	return false;
2615       insn = PREV_INSN (insn);
2616     }
2617 }
2618 
2619 /* Return true if BB contains just a return and possibly a USE of the
2620    return value.  Fill in *RET and *USE with the return and use insns
2621    if any found, otherwise NULL.  All CLOBBERs are ignored.  */
2622 
2623 static bool
bb_is_just_return(basic_block bb,rtx_insn ** ret,rtx_insn ** use)2624 bb_is_just_return (basic_block bb, rtx_insn **ret, rtx_insn **use)
2625 {
2626   *ret = *use = NULL;
2627   rtx_insn *insn;
2628 
2629   if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
2630     return false;
2631 
2632   FOR_BB_INSNS (bb, insn)
2633     if (NONDEBUG_INSN_P (insn))
2634       {
2635 	rtx pat = PATTERN (insn);
2636 
2637 	if (!*ret && ANY_RETURN_P (pat))
2638 	  *ret = insn;
2639 	else if (!*ret && !*use && GET_CODE (pat) == USE
2640 	    && REG_P (XEXP (pat, 0))
2641 	    && REG_FUNCTION_VALUE_P (XEXP (pat, 0)))
2642 	  *use = insn;
2643 	else if (GET_CODE (pat) != CLOBBER)
2644 	  return false;
2645       }
2646 
2647   return !!*ret;
2648 }
2649 
2650 /* Do simple CFG optimizations - basic block merging, simplifying of jump
2651    instructions etc.  Return nonzero if changes were made.  */
2652 
2653 static bool
try_optimize_cfg(int mode)2654 try_optimize_cfg (int mode)
2655 {
2656   bool changed_overall = false;
2657   bool changed;
2658   int iterations = 0;
2659   basic_block bb, b, next;
2660 
2661   if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
2662     clear_bb_flags ();
2663 
2664   crossjumps_occurred = false;
2665 
2666   FOR_EACH_BB_FN (bb, cfun)
2667     update_forwarder_flag (bb);
2668 
2669   if (! targetm.cannot_modify_jumps_p ())
2670     {
2671       first_pass = true;
2672       /* Attempt to merge blocks as made possible by edge removal.  If
2673 	 a block has only one successor, and the successor has only
2674 	 one predecessor, they may be combined.  */
2675       do
2676 	{
2677 	  block_was_dirty = false;
2678 	  changed = false;
2679 	  iterations++;
2680 
2681 	  if (dump_file)
2682 	    fprintf (dump_file,
2683 		     "\n\ntry_optimize_cfg iteration %i\n\n",
2684 		     iterations);
2685 
2686 	  for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
2687 	       != EXIT_BLOCK_PTR_FOR_FN (cfun);)
2688 	    {
2689 	      basic_block c;
2690 	      edge s;
2691 	      bool changed_here = false;
2692 
2693 	      /* Delete trivially dead basic blocks.  This is either
2694 		 blocks with no predecessors, or empty blocks with no
2695 		 successors.  However if the empty block with no
2696 		 successors is the successor of the ENTRY_BLOCK, it is
2697 		 kept.  This ensures that the ENTRY_BLOCK will have a
2698 		 successor which is a precondition for many RTL
2699 		 passes.  Empty blocks may result from expanding
2700 		 __builtin_unreachable ().  */
2701 	      if (EDGE_COUNT (b->preds) == 0
2702 		  || (EDGE_COUNT (b->succs) == 0
2703 		      && trivially_empty_bb_p (b)
2704 		      && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->dest
2705 		      != b))
2706 		{
2707 		  c = b->prev_bb;
2708 		  if (EDGE_COUNT (b->preds) > 0)
2709 		    {
2710 		      edge e;
2711 		      edge_iterator ei;
2712 
2713 		      if (current_ir_type () == IR_RTL_CFGLAYOUT)
2714 			{
2715 			  rtx_insn *insn;
2716 			  for (insn = BB_FOOTER (b);
2717 			       insn; insn = NEXT_INSN (insn))
2718 			    if (BARRIER_P (insn))
2719 			      break;
2720 			  if (insn)
2721 			    FOR_EACH_EDGE (e, ei, b->preds)
2722 			      if ((e->flags & EDGE_FALLTHRU))
2723 				{
2724 				  if (BB_FOOTER (b)
2725 				      && BB_FOOTER (e->src) == NULL)
2726 				    {
2727 				      BB_FOOTER (e->src) = BB_FOOTER (b);
2728 				      BB_FOOTER (b) = NULL;
2729 				    }
2730 				  else
2731 				    emit_barrier_after_bb (e->src);
2732 				}
2733 			}
2734 		      else
2735 			{
2736 			  rtx_insn *last = get_last_bb_insn (b);
2737 			  if (last && BARRIER_P (last))
2738 			    FOR_EACH_EDGE (e, ei, b->preds)
2739 			      if ((e->flags & EDGE_FALLTHRU))
2740 				emit_barrier_after (BB_END (e->src));
2741 			}
2742 		    }
2743 		  delete_basic_block (b);
2744 		  changed = true;
2745 		  /* Avoid trying to remove the exit block.  */
2746 		  b = (c == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? c->next_bb : c);
2747 		  continue;
2748 		}
2749 
2750 	      /* Remove code labels no longer used.  */
2751 	      if (single_pred_p (b)
2752 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2753 		  && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
2754 		  && LABEL_P (BB_HEAD (b))
2755 		  && !LABEL_PRESERVE_P (BB_HEAD (b))
2756 		  /* If the previous block ends with a branch to this
2757 		     block, we can't delete the label.  Normally this
2758 		     is a condjump that is yet to be simplified, but
2759 		     if CASE_DROPS_THRU, this can be a tablejump with
2760 		     some element going to the same place as the
2761 		     default (fallthru).  */
2762 		  && (single_pred (b) == ENTRY_BLOCK_PTR_FOR_FN (cfun)
2763 		      || !JUMP_P (BB_END (single_pred (b)))
2764 		      || ! label_is_jump_target_p (BB_HEAD (b),
2765 						   BB_END (single_pred (b)))))
2766 		{
2767 		  delete_insn (BB_HEAD (b));
2768 		  if (dump_file)
2769 		    fprintf (dump_file, "Deleted label in block %i.\n",
2770 			     b->index);
2771 		}
2772 
2773 	      /* If we fall through an empty block, we can remove it.  */
2774 	      if (!(mode & (CLEANUP_CFGLAYOUT | CLEANUP_NO_INSN_DEL))
2775 		  && single_pred_p (b)
2776 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2777 		  && !LABEL_P (BB_HEAD (b))
2778 		  && FORWARDER_BLOCK_P (b)
2779 		  /* Note that forwarder_block_p true ensures that
2780 		     there is a successor for this block.  */
2781 		  && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2782 		  && n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS + 1)
2783 		{
2784 		  if (dump_file)
2785 		    fprintf (dump_file,
2786 			     "Deleting fallthru block %i.\n",
2787 			     b->index);
2788 
2789 		  c = ((b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2790 		       ? b->next_bb : b->prev_bb);
2791 		  redirect_edge_succ_nodup (single_pred_edge (b),
2792 					    single_succ (b));
2793 		  delete_basic_block (b);
2794 		  changed = true;
2795 		  b = c;
2796 		  continue;
2797 		}
2798 
2799 	      /* Merge B with its single successor, if any.  */
2800 	      if (single_succ_p (b)
2801 		  && (s = single_succ_edge (b))
2802 		  && !(s->flags & EDGE_COMPLEX)
2803 		  && (c = s->dest) != EXIT_BLOCK_PTR_FOR_FN (cfun)
2804 		  && single_pred_p (c)
2805 		  && b != c)
2806 		{
2807 		  /* When not in cfg_layout mode use code aware of reordering
2808 		     INSN.  This code possibly creates new basic blocks so it
2809 		     does not fit merge_blocks interface and is kept here in
2810 		     hope that it will become useless once more of compiler
2811 		     is transformed to use cfg_layout mode.  */
2812 
2813 		  if ((mode & CLEANUP_CFGLAYOUT)
2814 		      && can_merge_blocks_p (b, c))
2815 		    {
2816 		      merge_blocks (b, c);
2817 		      update_forwarder_flag (b);
2818 		      changed_here = true;
2819 		    }
2820 		  else if (!(mode & CLEANUP_CFGLAYOUT)
2821 			   /* If the jump insn has side effects,
2822 			      we can't kill the edge.  */
2823 			   && (!JUMP_P (BB_END (b))
2824 			       || (reload_completed
2825 				   ? simplejump_p (BB_END (b))
2826 				   : (onlyjump_p (BB_END (b))
2827 				      && !tablejump_p (BB_END (b),
2828 						       NULL, NULL))))
2829 			   && (next = merge_blocks_move (s, b, c, mode)))
2830 		      {
2831 			b = next;
2832 			changed_here = true;
2833 		      }
2834 		}
2835 
2836 	      /* Try to change a branch to a return to just that return.  */
2837 	      rtx_insn *ret, *use;
2838 	      if (single_succ_p (b)
2839 		  && onlyjump_p (BB_END (b))
2840 		  && bb_is_just_return (single_succ (b), &ret, &use))
2841 		{
2842 		  if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2843 				     PATTERN (ret), 0))
2844 		    {
2845 		      if (use)
2846 			emit_insn_before (copy_insn (PATTERN (use)),
2847 					  BB_END (b));
2848 		      if (dump_file)
2849 			fprintf (dump_file, "Changed jump %d->%d to return.\n",
2850 				 b->index, single_succ (b)->index);
2851 		      redirect_edge_succ (single_succ_edge (b),
2852 					  EXIT_BLOCK_PTR_FOR_FN (cfun));
2853 		      single_succ_edge (b)->flags &= ~EDGE_CROSSING;
2854 		      changed_here = true;
2855 		    }
2856 		}
2857 
2858 	      /* Try to change a conditional branch to a return to the
2859 		 respective conditional return.  */
2860 	      if (EDGE_COUNT (b->succs) == 2
2861 		  && any_condjump_p (BB_END (b))
2862 		  && bb_is_just_return (BRANCH_EDGE (b)->dest, &ret, &use))
2863 		{
2864 		  if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2865 				     PATTERN (ret), 0))
2866 		    {
2867 		      if (use)
2868 			emit_insn_before (copy_insn (PATTERN (use)),
2869 					  BB_END (b));
2870 		      if (dump_file)
2871 			fprintf (dump_file, "Changed conditional jump %d->%d "
2872 				 "to conditional return.\n",
2873 				 b->index, BRANCH_EDGE (b)->dest->index);
2874 		      redirect_edge_succ (BRANCH_EDGE (b),
2875 					  EXIT_BLOCK_PTR_FOR_FN (cfun));
2876 		      BRANCH_EDGE (b)->flags &= ~EDGE_CROSSING;
2877 		      changed_here = true;
2878 		    }
2879 		}
2880 
2881 	      /* Try to flip a conditional branch that falls through to
2882 		 a return so that it becomes a conditional return and a
2883 		 new jump to the original branch target.  */
2884 	      if (EDGE_COUNT (b->succs) == 2
2885 		  && BRANCH_EDGE (b)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
2886 		  && any_condjump_p (BB_END (b))
2887 		  && bb_is_just_return (FALLTHRU_EDGE (b)->dest, &ret, &use))
2888 		{
2889 		  if (invert_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2890 				   JUMP_LABEL (BB_END (b)), 0))
2891 		    {
2892 		      basic_block new_ft = BRANCH_EDGE (b)->dest;
2893 		      if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2894 					 PATTERN (ret), 0))
2895 			{
2896 			  if (use)
2897 			    emit_insn_before (copy_insn (PATTERN (use)),
2898 					      BB_END (b));
2899 			  if (dump_file)
2900 			    fprintf (dump_file, "Changed conditional jump "
2901 				     "%d->%d to conditional return, adding "
2902 				     "fall-through jump.\n",
2903 				     b->index, BRANCH_EDGE (b)->dest->index);
2904 			  redirect_edge_succ (BRANCH_EDGE (b),
2905 					      EXIT_BLOCK_PTR_FOR_FN (cfun));
2906 			  BRANCH_EDGE (b)->flags &= ~EDGE_CROSSING;
2907 			  std::swap (BRANCH_EDGE (b)->probability,
2908 				     FALLTHRU_EDGE (b)->probability);
2909 			  update_br_prob_note (b);
2910 			  basic_block jb = force_nonfallthru (FALLTHRU_EDGE (b));
2911 			  notice_new_block (jb);
2912 			  if (!redirect_jump (as_a <rtx_jump_insn *> (BB_END (jb)),
2913 					      block_label (new_ft), 0))
2914 			    gcc_unreachable ();
2915 			  redirect_edge_succ (single_succ_edge (jb), new_ft);
2916 			  changed_here = true;
2917 			}
2918 		      else
2919 			{
2920 			  /* Invert the jump back to what it was.  This should
2921 			     never fail.  */
2922 			  if (!invert_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2923 					    JUMP_LABEL (BB_END (b)), 0))
2924 			    gcc_unreachable ();
2925 			}
2926 		    }
2927 		}
2928 
2929 	      /* Simplify branch over branch.  */
2930 	      if ((mode & CLEANUP_EXPENSIVE)
2931 		   && !(mode & CLEANUP_CFGLAYOUT)
2932 		   && try_simplify_condjump (b))
2933 		changed_here = true;
2934 
2935 	      /* If B has a single outgoing edge, but uses a
2936 		 non-trivial jump instruction without side-effects, we
2937 		 can either delete the jump entirely, or replace it
2938 		 with a simple unconditional jump.  */
2939 	      if (single_succ_p (b)
2940 		  && single_succ (b) != EXIT_BLOCK_PTR_FOR_FN (cfun)
2941 		  && onlyjump_p (BB_END (b))
2942 		  && !CROSSING_JUMP_P (BB_END (b))
2943 		  && try_redirect_by_replacing_jump (single_succ_edge (b),
2944 						     single_succ (b),
2945 						     (mode & CLEANUP_CFGLAYOUT) != 0))
2946 		{
2947 		  update_forwarder_flag (b);
2948 		  changed_here = true;
2949 		}
2950 
2951 	      /* Simplify branch to branch.  */
2952 	      if (try_forward_edges (mode, b))
2953 		{
2954 		  update_forwarder_flag (b);
2955 		  changed_here = true;
2956 		}
2957 
2958 	      /* Look for shared code between blocks.  */
2959 	      if ((mode & CLEANUP_CROSSJUMP)
2960 		  && try_crossjump_bb (mode, b))
2961 		changed_here = true;
2962 
2963 	      if ((mode & CLEANUP_CROSSJUMP)
2964 		  /* This can lengthen register lifetimes.  Do it only after
2965 		     reload.  */
2966 		  && reload_completed
2967 		  && try_head_merge_bb (b))
2968 		changed_here = true;
2969 
2970 	      /* Don't get confused by the index shift caused by
2971 		 deleting blocks.  */
2972 	      if (!changed_here)
2973 		b = b->next_bb;
2974 	      else
2975 		changed = true;
2976 	    }
2977 
2978 	  if ((mode & CLEANUP_CROSSJUMP)
2979 	      && try_crossjump_bb (mode, EXIT_BLOCK_PTR_FOR_FN (cfun)))
2980 	    changed = true;
2981 
2982 	  if (block_was_dirty)
2983 	    {
2984 	      /* This should only be set by head-merging.  */
2985 	      gcc_assert (mode & CLEANUP_CROSSJUMP);
2986 	      df_analyze ();
2987 	    }
2988 
2989 	  if (changed)
2990             {
2991               /* Edge forwarding in particular can cause hot blocks previously
2992                  reached by both hot and cold blocks to become dominated only
2993                  by cold blocks. This will cause the verification below to fail,
2994                  and lead to now cold code in the hot section. This is not easy
2995                  to detect and fix during edge forwarding, and in some cases
2996                  is only visible after newly unreachable blocks are deleted,
2997                  which will be done in fixup_partitions.  */
2998 	      if ((mode & CLEANUP_NO_PARTITIONING) == 0)
2999 		{
3000 		  fixup_partitions ();
3001 	          checking_verify_flow_info ();
3002 		}
3003             }
3004 
3005 	  changed_overall |= changed;
3006 	  first_pass = false;
3007 	}
3008       while (changed);
3009     }
3010 
3011   FOR_ALL_BB_FN (b, cfun)
3012     b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
3013 
3014   return changed_overall;
3015 }
3016 
3017 /* Delete all unreachable basic blocks.  */
3018 
3019 bool
delete_unreachable_blocks(void)3020 delete_unreachable_blocks (void)
3021 {
3022   bool changed = false;
3023   basic_block b, prev_bb;
3024 
3025   find_unreachable_blocks ();
3026 
3027   /* When we're in GIMPLE mode and there may be debug bind insns, we
3028      should delete blocks in reverse dominator order, so as to get a
3029      chance to substitute all released DEFs into debug bind stmts.  If
3030      we don't have dominators information, walking blocks backward
3031      gets us a better chance of retaining most debug information than
3032      otherwise.  */
3033   if (MAY_HAVE_DEBUG_BIND_INSNS && current_ir_type () == IR_GIMPLE
3034       && dom_info_available_p (CDI_DOMINATORS))
3035     {
3036       for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
3037 	   b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
3038 	{
3039 	  prev_bb = b->prev_bb;
3040 
3041 	  if (!(b->flags & BB_REACHABLE))
3042 	    {
3043 	      /* Speed up the removal of blocks that don't dominate
3044 		 others.  Walking backwards, this should be the common
3045 		 case.  */
3046 	      if (!first_dom_son (CDI_DOMINATORS, b))
3047 		delete_basic_block (b);
3048 	      else
3049 		{
3050 		  vec<basic_block> h
3051 		    = get_all_dominated_blocks (CDI_DOMINATORS, b);
3052 
3053 		  while (h.length ())
3054 		    {
3055 		      b = h.pop ();
3056 
3057 		      prev_bb = b->prev_bb;
3058 
3059 		      gcc_assert (!(b->flags & BB_REACHABLE));
3060 
3061 		      delete_basic_block (b);
3062 		    }
3063 
3064 		  h.release ();
3065 		}
3066 
3067 	      changed = true;
3068 	    }
3069 	}
3070     }
3071   else
3072     {
3073       for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
3074 	   b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
3075 	{
3076 	  prev_bb = b->prev_bb;
3077 
3078 	  if (!(b->flags & BB_REACHABLE))
3079 	    {
3080 	      delete_basic_block (b);
3081 	      changed = true;
3082 	    }
3083 	}
3084     }
3085 
3086   if (changed)
3087     tidy_fallthru_edges ();
3088   return changed;
3089 }
3090 
3091 /* Delete any jump tables never referenced.  We can't delete them at the
3092    time of removing tablejump insn as they are referenced by the preceding
3093    insns computing the destination, so we delay deleting and garbagecollect
3094    them once life information is computed.  */
3095 void
delete_dead_jumptables(void)3096 delete_dead_jumptables (void)
3097 {
3098   basic_block bb;
3099 
3100   /* A dead jump table does not belong to any basic block.  Scan insns
3101      between two adjacent basic blocks.  */
3102   FOR_EACH_BB_FN (bb, cfun)
3103     {
3104       rtx_insn *insn, *next;
3105 
3106       for (insn = NEXT_INSN (BB_END (bb));
3107 	   insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
3108 	   insn = next)
3109 	{
3110 	  next = NEXT_INSN (insn);
3111 	  if (LABEL_P (insn)
3112 	      && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
3113 	      && JUMP_TABLE_DATA_P (next))
3114 	    {
3115 	      rtx_insn *label = insn, *jump = next;
3116 
3117 	      if (dump_file)
3118 		fprintf (dump_file, "Dead jumptable %i removed\n",
3119 			 INSN_UID (insn));
3120 
3121 	      next = NEXT_INSN (next);
3122 	      delete_insn (jump);
3123 	      delete_insn (label);
3124 	    }
3125 	}
3126     }
3127 }
3128 
3129 
3130 /* Tidy the CFG by deleting unreachable code and whatnot.  */
3131 
3132 bool
cleanup_cfg(int mode)3133 cleanup_cfg (int mode)
3134 {
3135   bool changed = false;
3136 
3137   /* Set the cfglayout mode flag here.  We could update all the callers
3138      but that is just inconvenient, especially given that we eventually
3139      want to have cfglayout mode as the default.  */
3140   if (current_ir_type () == IR_RTL_CFGLAYOUT)
3141     mode |= CLEANUP_CFGLAYOUT;
3142 
3143   timevar_push (TV_CLEANUP_CFG);
3144   if (delete_unreachable_blocks ())
3145     {
3146       changed = true;
3147       /* We've possibly created trivially dead code.  Cleanup it right
3148 	 now to introduce more opportunities for try_optimize_cfg.  */
3149       if (!(mode & (CLEANUP_NO_INSN_DEL))
3150 	  && !reload_completed)
3151 	delete_trivially_dead_insns (get_insns (), max_reg_num ());
3152     }
3153 
3154   compact_blocks ();
3155 
3156   /* To tail-merge blocks ending in the same noreturn function (e.g.
3157      a call to abort) we have to insert fake edges to exit.  Do this
3158      here once.  The fake edges do not interfere with any other CFG
3159      cleanups.  */
3160   if (mode & CLEANUP_CROSSJUMP)
3161     add_noreturn_fake_exit_edges ();
3162 
3163   if (!dbg_cnt (cfg_cleanup))
3164     return changed;
3165 
3166   while (try_optimize_cfg (mode))
3167     {
3168       delete_unreachable_blocks (), changed = true;
3169       if (!(mode & CLEANUP_NO_INSN_DEL))
3170 	{
3171 	  /* Try to remove some trivially dead insns when doing an expensive
3172 	     cleanup.  But delete_trivially_dead_insns doesn't work after
3173 	     reload (it only handles pseudos) and run_fast_dce is too costly
3174 	     to run in every iteration.
3175 
3176 	     For effective cross jumping, we really want to run a fast DCE to
3177 	     clean up any dead conditions, or they get in the way of performing
3178 	     useful tail merges.
3179 
3180 	     Other transformations in cleanup_cfg are not so sensitive to dead
3181 	     code, so delete_trivially_dead_insns or even doing nothing at all
3182 	     is good enough.  */
3183 	  if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
3184 	      && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
3185 	    break;
3186 	  if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occurred)
3187 	    run_fast_dce ();
3188 	}
3189       else
3190 	break;
3191     }
3192 
3193   if (mode & CLEANUP_CROSSJUMP)
3194     remove_fake_exit_edges ();
3195 
3196   /* Don't call delete_dead_jumptables in cfglayout mode, because
3197      that function assumes that jump tables are in the insns stream.
3198      But we also don't _have_ to delete dead jumptables in cfglayout
3199      mode because we shouldn't even be looking at things that are
3200      not in a basic block.  Dead jumptables are cleaned up when
3201      going out of cfglayout mode.  */
3202   if (!(mode & CLEANUP_CFGLAYOUT))
3203     delete_dead_jumptables ();
3204 
3205   /* ???  We probably do this way too often.  */
3206   if (current_loops
3207       && (changed
3208 	  || (mode & CLEANUP_CFG_CHANGED)))
3209     {
3210       timevar_push (TV_REPAIR_LOOPS);
3211       /* The above doesn't preserve dominance info if available.  */
3212       gcc_assert (!dom_info_available_p (CDI_DOMINATORS));
3213       calculate_dominance_info (CDI_DOMINATORS);
3214       fix_loop_structure (NULL);
3215       free_dominance_info (CDI_DOMINATORS);
3216       timevar_pop (TV_REPAIR_LOOPS);
3217     }
3218 
3219   timevar_pop (TV_CLEANUP_CFG);
3220 
3221   return changed;
3222 }
3223 
3224 namespace {
3225 
3226 const pass_data pass_data_jump =
3227 {
3228   RTL_PASS, /* type */
3229   "jump", /* name */
3230   OPTGROUP_NONE, /* optinfo_flags */
3231   TV_JUMP, /* tv_id */
3232   0, /* properties_required */
3233   0, /* properties_provided */
3234   0, /* properties_destroyed */
3235   0, /* todo_flags_start */
3236   0, /* todo_flags_finish */
3237 };
3238 
3239 class pass_jump : public rtl_opt_pass
3240 {
3241 public:
pass_jump(gcc::context * ctxt)3242   pass_jump (gcc::context *ctxt)
3243     : rtl_opt_pass (pass_data_jump, ctxt)
3244   {}
3245 
3246   /* opt_pass methods: */
3247   virtual unsigned int execute (function *);
3248 
3249 }; // class pass_jump
3250 
3251 unsigned int
execute(function *)3252 pass_jump::execute (function *)
3253 {
3254   delete_trivially_dead_insns (get_insns (), max_reg_num ());
3255   if (dump_file)
3256     dump_flow_info (dump_file, dump_flags);
3257   cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
3258 	       | (flag_thread_jumps ? CLEANUP_THREADING : 0));
3259   return 0;
3260 }
3261 
3262 } // anon namespace
3263 
3264 rtl_opt_pass *
make_pass_jump(gcc::context * ctxt)3265 make_pass_jump (gcc::context *ctxt)
3266 {
3267   return new pass_jump (ctxt);
3268 }
3269 
3270 namespace {
3271 
3272 const pass_data pass_data_postreload_jump =
3273 {
3274   RTL_PASS, /* type */
3275   "postreload_jump", /* name */
3276   OPTGROUP_NONE, /* optinfo_flags */
3277   TV_JUMP, /* tv_id */
3278   0, /* properties_required */
3279   0, /* properties_provided */
3280   0, /* properties_destroyed */
3281   0, /* todo_flags_start */
3282   0, /* todo_flags_finish */
3283 };
3284 
3285 class pass_postreload_jump : public rtl_opt_pass
3286 {
3287 public:
pass_postreload_jump(gcc::context * ctxt)3288   pass_postreload_jump (gcc::context *ctxt)
3289     : rtl_opt_pass (pass_data_postreload_jump, ctxt)
3290   {}
3291 
3292   /* opt_pass methods: */
3293   virtual unsigned int execute (function *);
3294 
3295 }; // class pass_postreload_jump
3296 
3297 unsigned int
execute(function *)3298 pass_postreload_jump::execute (function *)
3299 {
3300   cleanup_cfg (flag_thread_jumps ? CLEANUP_THREADING : 0);
3301   return 0;
3302 }
3303 
3304 } // anon namespace
3305 
3306 rtl_opt_pass *
make_pass_postreload_jump(gcc::context * ctxt)3307 make_pass_postreload_jump (gcc::context *ctxt)
3308 {
3309   return new pass_postreload_jump (ctxt);
3310 }
3311 
3312 namespace {
3313 
3314 const pass_data pass_data_jump2 =
3315 {
3316   RTL_PASS, /* type */
3317   "jump2", /* name */
3318   OPTGROUP_NONE, /* optinfo_flags */
3319   TV_JUMP, /* tv_id */
3320   0, /* properties_required */
3321   0, /* properties_provided */
3322   0, /* properties_destroyed */
3323   0, /* todo_flags_start */
3324   0, /* todo_flags_finish */
3325 };
3326 
3327 class pass_jump2 : public rtl_opt_pass
3328 {
3329 public:
pass_jump2(gcc::context * ctxt)3330   pass_jump2 (gcc::context *ctxt)
3331     : rtl_opt_pass (pass_data_jump2, ctxt)
3332   {}
3333 
3334   /* opt_pass methods: */
execute(function *)3335   virtual unsigned int execute (function *)
3336     {
3337       cleanup_cfg (flag_crossjumping ? CLEANUP_CROSSJUMP : 0);
3338       return 0;
3339     }
3340 
3341 }; // class pass_jump2
3342 
3343 } // anon namespace
3344 
3345 rtl_opt_pass *
make_pass_jump2(gcc::context * ctxt)3346 make_pass_jump2 (gcc::context *ctxt)
3347 {
3348   return new pass_jump2 (ctxt);
3349 }
3350