1 /* Control flow optimization code for GNU compiler.
2    Copyright (C) 1987-2016 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* This file contains optimizer of the control flow.  The main entry point is
21    cleanup_cfg.  Following optimizations are performed:
22 
23    - Unreachable blocks removal
24    - Edge forwarding (edge to the forwarder block is forwarded to its
25      successor.  Simplification of the branch instruction is performed by
26      underlying infrastructure so branch can be converted to simplejump or
27      eliminated).
28    - Cross jumping (tail merging)
29    - Conditional jump-around-simplejump simplification
30    - Basic block merging.  */
31 
32 #include "config.h"
33 #include "system.h"
34 #include "coretypes.h"
35 #include "backend.h"
36 #include "target.h"
37 #include "rtl.h"
38 #include "tree.h"
39 #include "cfghooks.h"
40 #include "df.h"
41 #include "tm_p.h"
42 #include "insn-config.h"
43 #include "emit-rtl.h"
44 #include "cselib.h"
45 #include "params.h"
46 #include "tree-pass.h"
47 #include "cfgloop.h"
48 #include "cfgrtl.h"
49 #include "cfganal.h"
50 #include "cfgbuild.h"
51 #include "cfgcleanup.h"
52 #include "dce.h"
53 #include "dbgcnt.h"
54 #include "rtl-iter.h"
55 
56 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
57 
58 /* Set to true when we are running first pass of try_optimize_cfg loop.  */
59 static bool first_pass;
60 
61 /* Set to true if crossjumps occurred in the latest run of try_optimize_cfg.  */
62 static bool crossjumps_occured;
63 
64 /* Set to true if we couldn't run an optimization due to stale liveness
65    information; we should run df_analyze to enable more opportunities.  */
66 static bool block_was_dirty;
67 
68 static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction);
69 static bool try_crossjump_bb (int, basic_block);
70 static bool outgoing_edges_match (int, basic_block, basic_block);
71 static enum replace_direction old_insns_match_p (int, rtx_insn *, rtx_insn *);
72 
73 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
74 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
75 static bool try_optimize_cfg (int);
76 static bool try_simplify_condjump (basic_block);
77 static bool try_forward_edges (int, basic_block);
78 static edge thread_jump (edge, basic_block);
79 static bool mark_effect (rtx, bitmap);
80 static void notice_new_block (basic_block);
81 static void update_forwarder_flag (basic_block);
82 static void merge_memattrs (rtx, rtx);
83 
84 /* Set flags for newly created block.  */
85 
86 static void
notice_new_block(basic_block bb)87 notice_new_block (basic_block bb)
88 {
89   if (!bb)
90     return;
91 
92   if (forwarder_block_p (bb))
93     bb->flags |= BB_FORWARDER_BLOCK;
94 }
95 
96 /* Recompute forwarder flag after block has been modified.  */
97 
98 static void
update_forwarder_flag(basic_block bb)99 update_forwarder_flag (basic_block bb)
100 {
101   if (forwarder_block_p (bb))
102     bb->flags |= BB_FORWARDER_BLOCK;
103   else
104     bb->flags &= ~BB_FORWARDER_BLOCK;
105 }
106 
107 /* Simplify a conditional jump around an unconditional jump.
108    Return true if something changed.  */
109 
110 static bool
try_simplify_condjump(basic_block cbranch_block)111 try_simplify_condjump (basic_block cbranch_block)
112 {
113   basic_block jump_block, jump_dest_block, cbranch_dest_block;
114   edge cbranch_jump_edge, cbranch_fallthru_edge;
115   rtx_insn *cbranch_insn;
116 
117   /* Verify that there are exactly two successors.  */
118   if (EDGE_COUNT (cbranch_block->succs) != 2)
119     return false;
120 
121   /* Verify that we've got a normal conditional branch at the end
122      of the block.  */
123   cbranch_insn = BB_END (cbranch_block);
124   if (!any_condjump_p (cbranch_insn))
125     return false;
126 
127   cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
128   cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
129 
130   /* The next block must not have multiple predecessors, must not
131      be the last block in the function, and must contain just the
132      unconditional jump.  */
133   jump_block = cbranch_fallthru_edge->dest;
134   if (!single_pred_p (jump_block)
135       || jump_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
136       || !FORWARDER_BLOCK_P (jump_block))
137     return false;
138   jump_dest_block = single_succ (jump_block);
139 
140   /* If we are partitioning hot/cold basic blocks, we don't want to
141      mess up unconditional or indirect jumps that cross between hot
142      and cold sections.
143 
144      Basic block partitioning may result in some jumps that appear to
145      be optimizable (or blocks that appear to be mergeable), but which really
146      must be left untouched (they are required to make it safely across
147      partition boundaries).  See the comments at the top of
148      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
149 
150   if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
151       || (cbranch_jump_edge->flags & EDGE_CROSSING))
152     return false;
153 
154   /* The conditional branch must target the block after the
155      unconditional branch.  */
156   cbranch_dest_block = cbranch_jump_edge->dest;
157 
158   if (cbranch_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
159       || !can_fallthru (jump_block, cbranch_dest_block))
160     return false;
161 
162   /* Invert the conditional branch.  */
163   if (!invert_jump (as_a <rtx_jump_insn *> (cbranch_insn),
164 		    block_label (jump_dest_block), 0))
165     return false;
166 
167   if (dump_file)
168     fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
169 	     INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
170 
171   /* Success.  Update the CFG to match.  Note that after this point
172      the edge variable names appear backwards; the redirection is done
173      this way to preserve edge profile data.  */
174   cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
175 						cbranch_dest_block);
176   cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
177 						    jump_dest_block);
178   cbranch_jump_edge->flags |= EDGE_FALLTHRU;
179   cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
180   update_br_prob_note (cbranch_block);
181 
182   /* Delete the block with the unconditional jump, and clean up the mess.  */
183   delete_basic_block (jump_block);
184   tidy_fallthru_edge (cbranch_jump_edge);
185   update_forwarder_flag (cbranch_block);
186 
187   return true;
188 }
189 
190 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
191    on register.  Used by jump threading.  */
192 
193 static bool
mark_effect(rtx exp,regset nonequal)194 mark_effect (rtx exp, regset nonequal)
195 {
196   rtx dest;
197   switch (GET_CODE (exp))
198     {
199       /* In case we do clobber the register, mark it as equal, as we know the
200 	 value is dead so it don't have to match.  */
201     case CLOBBER:
202       dest = XEXP (exp, 0);
203       if (REG_P (dest))
204 	bitmap_clear_range (nonequal, REGNO (dest), REG_NREGS (dest));
205       return false;
206 
207     case SET:
208       if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
209 	return false;
210       dest = SET_DEST (exp);
211       if (dest == pc_rtx)
212 	return false;
213       if (!REG_P (dest))
214 	return true;
215       bitmap_set_range (nonequal, REGNO (dest), REG_NREGS (dest));
216       return false;
217 
218     default:
219       return false;
220     }
221 }
222 
223 /* Return true if X contains a register in NONEQUAL.  */
224 static bool
mentions_nonequal_regs(const_rtx x,regset nonequal)225 mentions_nonequal_regs (const_rtx x, regset nonequal)
226 {
227   subrtx_iterator::array_type array;
228   FOR_EACH_SUBRTX (iter, array, x, NONCONST)
229     {
230       const_rtx x = *iter;
231       if (REG_P (x))
232 	{
233 	  unsigned int end_regno = END_REGNO (x);
234 	  for (unsigned int regno = REGNO (x); regno < end_regno; ++regno)
235 	    if (REGNO_REG_SET_P (nonequal, regno))
236 	      return true;
237 	}
238     }
239   return false;
240 }
241 
242 /* Attempt to prove that the basic block B will have no side effects and
243    always continues in the same edge if reached via E.  Return the edge
244    if exist, NULL otherwise.  */
245 
246 static edge
thread_jump(edge e,basic_block b)247 thread_jump (edge e, basic_block b)
248 {
249   rtx set1, set2, cond1, cond2;
250   rtx_insn *insn;
251   enum rtx_code code1, code2, reversed_code2;
252   bool reverse1 = false;
253   unsigned i;
254   regset nonequal;
255   bool failed = false;
256   reg_set_iterator rsi;
257 
258   if (b->flags & BB_NONTHREADABLE_BLOCK)
259     return NULL;
260 
261   /* At the moment, we do handle only conditional jumps, but later we may
262      want to extend this code to tablejumps and others.  */
263   if (EDGE_COUNT (e->src->succs) != 2)
264     return NULL;
265   if (EDGE_COUNT (b->succs) != 2)
266     {
267       b->flags |= BB_NONTHREADABLE_BLOCK;
268       return NULL;
269     }
270 
271   /* Second branch must end with onlyjump, as we will eliminate the jump.  */
272   if (!any_condjump_p (BB_END (e->src)))
273     return NULL;
274 
275   if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
276     {
277       b->flags |= BB_NONTHREADABLE_BLOCK;
278       return NULL;
279     }
280 
281   set1 = pc_set (BB_END (e->src));
282   set2 = pc_set (BB_END (b));
283   if (((e->flags & EDGE_FALLTHRU) != 0)
284       != (XEXP (SET_SRC (set1), 1) == pc_rtx))
285     reverse1 = true;
286 
287   cond1 = XEXP (SET_SRC (set1), 0);
288   cond2 = XEXP (SET_SRC (set2), 0);
289   if (reverse1)
290     code1 = reversed_comparison_code (cond1, BB_END (e->src));
291   else
292     code1 = GET_CODE (cond1);
293 
294   code2 = GET_CODE (cond2);
295   reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
296 
297   if (!comparison_dominates_p (code1, code2)
298       && !comparison_dominates_p (code1, reversed_code2))
299     return NULL;
300 
301   /* Ensure that the comparison operators are equivalent.
302      ??? This is far too pessimistic.  We should allow swapped operands,
303      different CCmodes, or for example comparisons for interval, that
304      dominate even when operands are not equivalent.  */
305   if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
306       || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
307     return NULL;
308 
309   /* Short circuit cases where block B contains some side effects, as we can't
310      safely bypass it.  */
311   for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
312        insn = NEXT_INSN (insn))
313     if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
314       {
315 	b->flags |= BB_NONTHREADABLE_BLOCK;
316 	return NULL;
317       }
318 
319   cselib_init (0);
320 
321   /* First process all values computed in the source basic block.  */
322   for (insn = NEXT_INSN (BB_HEAD (e->src));
323        insn != NEXT_INSN (BB_END (e->src));
324        insn = NEXT_INSN (insn))
325     if (INSN_P (insn))
326       cselib_process_insn (insn);
327 
328   nonequal = BITMAP_ALLOC (NULL);
329   CLEAR_REG_SET (nonequal);
330 
331   /* Now assume that we've continued by the edge E to B and continue
332      processing as if it were same basic block.
333      Our goal is to prove that whole block is an NOOP.  */
334 
335   for (insn = NEXT_INSN (BB_HEAD (b));
336        insn != NEXT_INSN (BB_END (b)) && !failed;
337        insn = NEXT_INSN (insn))
338     {
339       if (INSN_P (insn))
340 	{
341 	  rtx pat = PATTERN (insn);
342 
343 	  if (GET_CODE (pat) == PARALLEL)
344 	    {
345 	      for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
346 		failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
347 	    }
348 	  else
349 	    failed |= mark_effect (pat, nonequal);
350 	}
351 
352       cselib_process_insn (insn);
353     }
354 
355   /* Later we should clear nonequal of dead registers.  So far we don't
356      have life information in cfg_cleanup.  */
357   if (failed)
358     {
359       b->flags |= BB_NONTHREADABLE_BLOCK;
360       goto failed_exit;
361     }
362 
363   /* cond2 must not mention any register that is not equal to the
364      former block.  */
365   if (mentions_nonequal_regs (cond2, nonequal))
366     goto failed_exit;
367 
368   EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
369     goto failed_exit;
370 
371   BITMAP_FREE (nonequal);
372   cselib_finish ();
373   if ((comparison_dominates_p (code1, code2) != 0)
374       != (XEXP (SET_SRC (set2), 1) == pc_rtx))
375     return BRANCH_EDGE (b);
376   else
377     return FALLTHRU_EDGE (b);
378 
379 failed_exit:
380   BITMAP_FREE (nonequal);
381   cselib_finish ();
382   return NULL;
383 }
384 
385 /* Attempt to forward edges leaving basic block B.
386    Return true if successful.  */
387 
388 static bool
try_forward_edges(int mode,basic_block b)389 try_forward_edges (int mode, basic_block b)
390 {
391   bool changed = false;
392   edge_iterator ei;
393   edge e, *threaded_edges = NULL;
394 
395   /* If we are partitioning hot/cold basic blocks, we don't want to
396      mess up unconditional or indirect jumps that cross between hot
397      and cold sections.
398 
399      Basic block partitioning may result in some jumps that appear to
400      be optimizable (or blocks that appear to be mergeable), but which really
401      must be left untouched (they are required to make it safely across
402      partition boundaries).  See the comments at the top of
403      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
404 
405   if (JUMP_P (BB_END (b)) && CROSSING_JUMP_P (BB_END (b)))
406     return false;
407 
408   for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
409     {
410       basic_block target, first;
411       location_t goto_locus;
412       int counter;
413       bool threaded = false;
414       int nthreaded_edges = 0;
415       bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0;
416 
417       /* Skip complex edges because we don't know how to update them.
418 
419 	 Still handle fallthru edges, as we can succeed to forward fallthru
420 	 edge to the same place as the branch edge of conditional branch
421 	 and turn conditional branch to an unconditional branch.  */
422       if (e->flags & EDGE_COMPLEX)
423 	{
424 	  ei_next (&ei);
425 	  continue;
426 	}
427 
428       target = first = e->dest;
429       counter = NUM_FIXED_BLOCKS;
430       goto_locus = e->goto_locus;
431 
432       /* If we are partitioning hot/cold basic_blocks, we don't want to mess
433 	 up jumps that cross between hot/cold sections.
434 
435 	 Basic block partitioning may result in some jumps that appear
436 	 to be optimizable (or blocks that appear to be mergeable), but which
437 	 really must be left untouched (they are required to make it safely
438 	 across partition boundaries).  See the comments at the top of
439 	 bb-reorder.c:partition_hot_cold_basic_blocks for complete
440 	 details.  */
441 
442       if (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
443 	  && JUMP_P (BB_END (first))
444 	  && CROSSING_JUMP_P (BB_END (first)))
445 	return changed;
446 
447       while (counter < n_basic_blocks_for_fn (cfun))
448 	{
449 	  basic_block new_target = NULL;
450 	  bool new_target_threaded = false;
451 	  may_thread |= (target->flags & BB_MODIFIED) != 0;
452 
453 	  if (FORWARDER_BLOCK_P (target)
454 	      && !(single_succ_edge (target)->flags & EDGE_CROSSING)
455 	      && single_succ (target) != EXIT_BLOCK_PTR_FOR_FN (cfun))
456 	    {
457 	      /* Bypass trivial infinite loops.  */
458 	      new_target = single_succ (target);
459 	      if (target == new_target)
460 		counter = n_basic_blocks_for_fn (cfun);
461 	      else if (!optimize)
462 		{
463 		  /* When not optimizing, ensure that edges or forwarder
464 		     blocks with different locus are not optimized out.  */
465 		  location_t new_locus = single_succ_edge (target)->goto_locus;
466 		  location_t locus = goto_locus;
467 
468 		  if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
469 		      && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
470 		      && new_locus != locus)
471 		    new_target = NULL;
472 		  else
473 		    {
474 		      if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
475 			locus = new_locus;
476 
477 		      rtx_insn *last = BB_END (target);
478 		      if (DEBUG_INSN_P (last))
479 			last = prev_nondebug_insn (last);
480 		      if (last && INSN_P (last))
481 			new_locus = INSN_LOCATION (last);
482 		      else
483 			new_locus = UNKNOWN_LOCATION;
484 
485 		      if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
486 			  && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
487 			  && new_locus != locus)
488 			new_target = NULL;
489 		      else
490 			{
491 			  if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
492 			    locus = new_locus;
493 
494 			  goto_locus = locus;
495 			}
496 		    }
497 		}
498 	    }
499 
500 	  /* Allow to thread only over one edge at time to simplify updating
501 	     of probabilities.  */
502 	  else if ((mode & CLEANUP_THREADING) && may_thread)
503 	    {
504 	      edge t = thread_jump (e, target);
505 	      if (t)
506 		{
507 		  if (!threaded_edges)
508 		    threaded_edges = XNEWVEC (edge,
509 					      n_basic_blocks_for_fn (cfun));
510 		  else
511 		    {
512 		      int i;
513 
514 		      /* Detect an infinite loop across blocks not
515 			 including the start block.  */
516 		      for (i = 0; i < nthreaded_edges; ++i)
517 			if (threaded_edges[i] == t)
518 			  break;
519 		      if (i < nthreaded_edges)
520 			{
521 			  counter = n_basic_blocks_for_fn (cfun);
522 			  break;
523 			}
524 		    }
525 
526 		  /* Detect an infinite loop across the start block.  */
527 		  if (t->dest == b)
528 		    break;
529 
530 		  gcc_assert (nthreaded_edges
531 			      < (n_basic_blocks_for_fn (cfun)
532 				 - NUM_FIXED_BLOCKS));
533 		  threaded_edges[nthreaded_edges++] = t;
534 
535 		  new_target = t->dest;
536 		  new_target_threaded = true;
537 		}
538 	    }
539 
540 	  if (!new_target)
541 	    break;
542 
543 	  counter++;
544 	  target = new_target;
545 	  threaded |= new_target_threaded;
546 	}
547 
548       if (counter >= n_basic_blocks_for_fn (cfun))
549 	{
550 	  if (dump_file)
551 	    fprintf (dump_file, "Infinite loop in BB %i.\n",
552 		     target->index);
553 	}
554       else if (target == first)
555 	; /* We didn't do anything.  */
556       else
557 	{
558 	  /* Save the values now, as the edge may get removed.  */
559 	  gcov_type edge_count = e->count;
560 	  int edge_probability = e->probability;
561 	  int edge_frequency;
562 	  int n = 0;
563 
564 	  e->goto_locus = goto_locus;
565 
566 	  /* Don't force if target is exit block.  */
567 	  if (threaded && target != EXIT_BLOCK_PTR_FOR_FN (cfun))
568 	    {
569 	      notice_new_block (redirect_edge_and_branch_force (e, target));
570 	      if (dump_file)
571 		fprintf (dump_file, "Conditionals threaded.\n");
572 	    }
573 	  else if (!redirect_edge_and_branch (e, target))
574 	    {
575 	      if (dump_file)
576 		fprintf (dump_file,
577 			 "Forwarding edge %i->%i to %i failed.\n",
578 			 b->index, e->dest->index, target->index);
579 	      ei_next (&ei);
580 	      continue;
581 	    }
582 
583 	  /* We successfully forwarded the edge.  Now update profile
584 	     data: for each edge we traversed in the chain, remove
585 	     the original edge's execution count.  */
586 	  edge_frequency = apply_probability (b->frequency, edge_probability);
587 
588 	  do
589 	    {
590 	      edge t;
591 
592 	      if (!single_succ_p (first))
593 		{
594 		  gcc_assert (n < nthreaded_edges);
595 		  t = threaded_edges [n++];
596 		  gcc_assert (t->src == first);
597 		  update_bb_profile_for_threading (first, edge_frequency,
598 						   edge_count, t);
599 		  update_br_prob_note (first);
600 		}
601 	      else
602 		{
603 		  first->count -= edge_count;
604 		  if (first->count < 0)
605 		    first->count = 0;
606 		  first->frequency -= edge_frequency;
607 		  if (first->frequency < 0)
608 		    first->frequency = 0;
609 		  /* It is possible that as the result of
610 		     threading we've removed edge as it is
611 		     threaded to the fallthru edge.  Avoid
612 		     getting out of sync.  */
613 		  if (n < nthreaded_edges
614 		      && first == threaded_edges [n]->src)
615 		    n++;
616 		  t = single_succ_edge (first);
617 		}
618 
619 	      t->count -= edge_count;
620 	      if (t->count < 0)
621 		t->count = 0;
622 	      first = t->dest;
623 	    }
624 	  while (first != target);
625 
626 	  changed = true;
627 	  continue;
628 	}
629       ei_next (&ei);
630     }
631 
632   free (threaded_edges);
633   return changed;
634 }
635 
636 
637 /* Blocks A and B are to be merged into a single block.  A has no incoming
638    fallthru edge, so it can be moved before B without adding or modifying
639    any jumps (aside from the jump from A to B).  */
640 
641 static void
merge_blocks_move_predecessor_nojumps(basic_block a,basic_block b)642 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
643 {
644   rtx_insn *barrier;
645 
646   /* If we are partitioning hot/cold basic blocks, we don't want to
647      mess up unconditional or indirect jumps that cross between hot
648      and cold sections.
649 
650      Basic block partitioning may result in some jumps that appear to
651      be optimizable (or blocks that appear to be mergeable), but which really
652      must be left untouched (they are required to make it safely across
653      partition boundaries).  See the comments at the top of
654      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
655 
656   if (BB_PARTITION (a) != BB_PARTITION (b))
657     return;
658 
659   barrier = next_nonnote_insn (BB_END (a));
660   gcc_assert (BARRIER_P (barrier));
661   delete_insn (barrier);
662 
663   /* Scramble the insn chain.  */
664   if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
665     reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
666   df_set_bb_dirty (a);
667 
668   if (dump_file)
669     fprintf (dump_file, "Moved block %d before %d and merged.\n",
670 	     a->index, b->index);
671 
672   /* Swap the records for the two blocks around.  */
673 
674   unlink_block (a);
675   link_block (a, b->prev_bb);
676 
677   /* Now blocks A and B are contiguous.  Merge them.  */
678   merge_blocks (a, b);
679 }
680 
681 /* Blocks A and B are to be merged into a single block.  B has no outgoing
682    fallthru edge, so it can be moved after A without adding or modifying
683    any jumps (aside from the jump from A to B).  */
684 
685 static void
merge_blocks_move_successor_nojumps(basic_block a,basic_block b)686 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
687 {
688   rtx_insn *barrier, *real_b_end;
689   rtx label;
690   rtx_jump_table_data *table;
691 
692   /* If we are partitioning hot/cold basic blocks, we don't want to
693      mess up unconditional or indirect jumps that cross between hot
694      and cold sections.
695 
696      Basic block partitioning may result in some jumps that appear to
697      be optimizable (or blocks that appear to be mergeable), but which really
698      must be left untouched (they are required to make it safely across
699      partition boundaries).  See the comments at the top of
700      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
701 
702   if (BB_PARTITION (a) != BB_PARTITION (b))
703     return;
704 
705   real_b_end = BB_END (b);
706 
707   /* If there is a jump table following block B temporarily add the jump table
708      to block B so that it will also be moved to the correct location.  */
709   if (tablejump_p (BB_END (b), &label, &table)
710       && prev_active_insn (label) == BB_END (b))
711     {
712       BB_END (b) = table;
713     }
714 
715   /* There had better have been a barrier there.  Delete it.  */
716   barrier = NEXT_INSN (BB_END (b));
717   if (barrier && BARRIER_P (barrier))
718     delete_insn (barrier);
719 
720 
721   /* Scramble the insn chain.  */
722   reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
723 
724   /* Restore the real end of b.  */
725   BB_END (b) = real_b_end;
726 
727   if (dump_file)
728     fprintf (dump_file, "Moved block %d after %d and merged.\n",
729 	     b->index, a->index);
730 
731   /* Now blocks A and B are contiguous.  Merge them.  */
732   merge_blocks (a, b);
733 }
734 
735 /* Attempt to merge basic blocks that are potentially non-adjacent.
736    Return NULL iff the attempt failed, otherwise return basic block
737    where cleanup_cfg should continue.  Because the merging commonly
738    moves basic block away or introduces another optimization
739    possibility, return basic block just before B so cleanup_cfg don't
740    need to iterate.
741 
742    It may be good idea to return basic block before C in the case
743    C has been moved after B and originally appeared earlier in the
744    insn sequence, but we have no information available about the
745    relative ordering of these two.  Hopefully it is not too common.  */
746 
747 static basic_block
merge_blocks_move(edge e,basic_block b,basic_block c,int mode)748 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
749 {
750   basic_block next;
751 
752   /* If we are partitioning hot/cold basic blocks, we don't want to
753      mess up unconditional or indirect jumps that cross between hot
754      and cold sections.
755 
756      Basic block partitioning may result in some jumps that appear to
757      be optimizable (or blocks that appear to be mergeable), but which really
758      must be left untouched (they are required to make it safely across
759      partition boundaries).  See the comments at the top of
760      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
761 
762   if (BB_PARTITION (b) != BB_PARTITION (c))
763     return NULL;
764 
765   /* If B has a fallthru edge to C, no need to move anything.  */
766   if (e->flags & EDGE_FALLTHRU)
767     {
768       int b_index = b->index, c_index = c->index;
769 
770       /* Protect the loop latches.  */
771       if (current_loops && c->loop_father->latch == c)
772 	return NULL;
773 
774       merge_blocks (b, c);
775       update_forwarder_flag (b);
776 
777       if (dump_file)
778 	fprintf (dump_file, "Merged %d and %d without moving.\n",
779 		 b_index, c_index);
780 
781       return b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? b : b->prev_bb;
782     }
783 
784   /* Otherwise we will need to move code around.  Do that only if expensive
785      transformations are allowed.  */
786   else if (mode & CLEANUP_EXPENSIVE)
787     {
788       edge tmp_edge, b_fallthru_edge;
789       bool c_has_outgoing_fallthru;
790       bool b_has_incoming_fallthru;
791 
792       /* Avoid overactive code motion, as the forwarder blocks should be
793 	 eliminated by edge redirection instead.  One exception might have
794 	 been if B is a forwarder block and C has no fallthru edge, but
795 	 that should be cleaned up by bb-reorder instead.  */
796       if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
797 	return NULL;
798 
799       /* We must make sure to not munge nesting of lexical blocks,
800 	 and loop notes.  This is done by squeezing out all the notes
801 	 and leaving them there to lie.  Not ideal, but functional.  */
802 
803       tmp_edge = find_fallthru_edge (c->succs);
804       c_has_outgoing_fallthru = (tmp_edge != NULL);
805 
806       tmp_edge = find_fallthru_edge (b->preds);
807       b_has_incoming_fallthru = (tmp_edge != NULL);
808       b_fallthru_edge = tmp_edge;
809       next = b->prev_bb;
810       if (next == c)
811 	next = next->prev_bb;
812 
813       /* Otherwise, we're going to try to move C after B.  If C does
814 	 not have an outgoing fallthru, then it can be moved
815 	 immediately after B without introducing or modifying jumps.  */
816       if (! c_has_outgoing_fallthru)
817 	{
818 	  merge_blocks_move_successor_nojumps (b, c);
819 	  return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
820 	}
821 
822       /* If B does not have an incoming fallthru, then it can be moved
823 	 immediately before C without introducing or modifying jumps.
824 	 C cannot be the first block, so we do not have to worry about
825 	 accessing a non-existent block.  */
826 
827       if (b_has_incoming_fallthru)
828 	{
829 	  basic_block bb;
830 
831 	  if (b_fallthru_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
832 	    return NULL;
833 	  bb = force_nonfallthru (b_fallthru_edge);
834 	  if (bb)
835 	    notice_new_block (bb);
836 	}
837 
838       merge_blocks_move_predecessor_nojumps (b, c);
839       return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
840     }
841 
842   return NULL;
843 }
844 
845 
846 /* Removes the memory attributes of MEM expression
847    if they are not equal.  */
848 
849 static void
merge_memattrs(rtx x,rtx y)850 merge_memattrs (rtx x, rtx y)
851 {
852   int i;
853   int j;
854   enum rtx_code code;
855   const char *fmt;
856 
857   if (x == y)
858     return;
859   if (x == 0 || y == 0)
860     return;
861 
862   code = GET_CODE (x);
863 
864   if (code != GET_CODE (y))
865     return;
866 
867   if (GET_MODE (x) != GET_MODE (y))
868     return;
869 
870   if (code == MEM && !mem_attrs_eq_p (MEM_ATTRS (x), MEM_ATTRS (y)))
871     {
872       if (! MEM_ATTRS (x))
873 	MEM_ATTRS (y) = 0;
874       else if (! MEM_ATTRS (y))
875 	MEM_ATTRS (x) = 0;
876       else
877 	{
878 	  HOST_WIDE_INT mem_size;
879 
880 	  if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
881 	    {
882 	      set_mem_alias_set (x, 0);
883 	      set_mem_alias_set (y, 0);
884 	    }
885 
886 	  if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
887 	    {
888 	      set_mem_expr (x, 0);
889 	      set_mem_expr (y, 0);
890 	      clear_mem_offset (x);
891 	      clear_mem_offset (y);
892 	    }
893 	  else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y)
894 		   || (MEM_OFFSET_KNOWN_P (x)
895 		       && MEM_OFFSET (x) != MEM_OFFSET (y)))
896 	    {
897 	      clear_mem_offset (x);
898 	      clear_mem_offset (y);
899 	    }
900 
901 	  if (MEM_SIZE_KNOWN_P (x) && MEM_SIZE_KNOWN_P (y))
902 	    {
903 	      mem_size = MAX (MEM_SIZE (x), MEM_SIZE (y));
904 	      set_mem_size (x, mem_size);
905 	      set_mem_size (y, mem_size);
906 	    }
907 	  else
908 	    {
909 	      clear_mem_size (x);
910 	      clear_mem_size (y);
911 	    }
912 
913 	  set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
914 	  set_mem_align (y, MEM_ALIGN (x));
915 	}
916     }
917   if (code == MEM)
918     {
919       if (MEM_READONLY_P (x) != MEM_READONLY_P (y))
920 	{
921 	  MEM_READONLY_P (x) = 0;
922 	  MEM_READONLY_P (y) = 0;
923 	}
924       if (MEM_NOTRAP_P (x) != MEM_NOTRAP_P (y))
925 	{
926 	  MEM_NOTRAP_P (x) = 0;
927 	  MEM_NOTRAP_P (y) = 0;
928 	}
929       if (MEM_VOLATILE_P (x) != MEM_VOLATILE_P (y))
930 	{
931 	  MEM_VOLATILE_P (x) = 1;
932 	  MEM_VOLATILE_P (y) = 1;
933 	}
934     }
935 
936   fmt = GET_RTX_FORMAT (code);
937   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
938     {
939       switch (fmt[i])
940 	{
941 	case 'E':
942 	  /* Two vectors must have the same length.  */
943 	  if (XVECLEN (x, i) != XVECLEN (y, i))
944 	    return;
945 
946 	  for (j = 0; j < XVECLEN (x, i); j++)
947 	    merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
948 
949 	  break;
950 
951 	case 'e':
952 	  merge_memattrs (XEXP (x, i), XEXP (y, i));
953 	}
954     }
955   return;
956 }
957 
958 
959  /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
960     different single sets S1 and S2.  */
961 
962 static bool
equal_different_set_p(rtx p1,rtx s1,rtx p2,rtx s2)963 equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2)
964 {
965   int i;
966   rtx e1, e2;
967 
968   if (p1 == s1 && p2 == s2)
969     return true;
970 
971   if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL)
972     return false;
973 
974   if (XVECLEN (p1, 0) != XVECLEN (p2, 0))
975     return false;
976 
977   for (i = 0; i < XVECLEN (p1, 0); i++)
978     {
979       e1 = XVECEXP (p1, 0, i);
980       e2 = XVECEXP (p2, 0, i);
981       if (e1 == s1 && e2 == s2)
982         continue;
983       if (reload_completed
984           ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2))
985         continue;
986 
987       return false;
988     }
989 
990   return true;
991 }
992 
993 
994 /* NOTE1 is the REG_EQUAL note, if any, attached to an insn
995    that is a single_set with a SET_SRC of SRC1.  Similarly
996    for NOTE2/SRC2.
997 
998    So effectively NOTE1/NOTE2 are an alternate form of
999    SRC1/SRC2 respectively.
1000 
1001    Return nonzero if SRC1 or NOTE1 has the same constant
1002    integer value as SRC2 or NOTE2.   Else return zero.  */
1003 static int
values_equal_p(rtx note1,rtx note2,rtx src1,rtx src2)1004 values_equal_p (rtx note1, rtx note2, rtx src1, rtx src2)
1005 {
1006   if (note1
1007       && note2
1008       && CONST_INT_P (XEXP (note1, 0))
1009       && rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0)))
1010     return 1;
1011 
1012   if (!note1
1013       && !note2
1014       && CONST_INT_P (src1)
1015       && CONST_INT_P (src2)
1016       && rtx_equal_p (src1, src2))
1017     return 1;
1018 
1019   if (note1
1020       && CONST_INT_P (src2)
1021       && rtx_equal_p (XEXP (note1, 0), src2))
1022     return 1;
1023 
1024   if (note2
1025       && CONST_INT_P (src1)
1026       && rtx_equal_p (XEXP (note2, 0), src1))
1027     return 1;
1028 
1029   return 0;
1030 }
1031 
1032 /* Examine register notes on I1 and I2 and return:
1033    - dir_forward if I1 can be replaced by I2, or
1034    - dir_backward if I2 can be replaced by I1, or
1035    - dir_both if both are the case.  */
1036 
1037 static enum replace_direction
can_replace_by(rtx_insn * i1,rtx_insn * i2)1038 can_replace_by (rtx_insn *i1, rtx_insn *i2)
1039 {
1040   rtx s1, s2, d1, d2, src1, src2, note1, note2;
1041   bool c1, c2;
1042 
1043   /* Check for 2 sets.  */
1044   s1 = single_set (i1);
1045   s2 = single_set (i2);
1046   if (s1 == NULL_RTX || s2 == NULL_RTX)
1047     return dir_none;
1048 
1049   /* Check that the 2 sets set the same dest.  */
1050   d1 = SET_DEST (s1);
1051   d2 = SET_DEST (s2);
1052   if (!(reload_completed
1053         ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2)))
1054     return dir_none;
1055 
1056   /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
1057      set dest to the same value.  */
1058   note1 = find_reg_equal_equiv_note (i1);
1059   note2 = find_reg_equal_equiv_note (i2);
1060 
1061   src1 = SET_SRC (s1);
1062   src2 = SET_SRC (s2);
1063 
1064   if (!values_equal_p (note1, note2, src1, src2))
1065     return dir_none;
1066 
1067   if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2))
1068     return dir_none;
1069 
1070   /* Although the 2 sets set dest to the same value, we cannot replace
1071        (set (dest) (const_int))
1072      by
1073        (set (dest) (reg))
1074      because we don't know if the reg is live and has the same value at the
1075      location of replacement.  */
1076   c1 = CONST_INT_P (src1);
1077   c2 = CONST_INT_P (src2);
1078   if (c1 && c2)
1079     return dir_both;
1080   else if (c2)
1081     return dir_forward;
1082   else if (c1)
1083     return dir_backward;
1084 
1085   return dir_none;
1086 }
1087 
1088 /* Merges directions A and B.  */
1089 
1090 static enum replace_direction
merge_dir(enum replace_direction a,enum replace_direction b)1091 merge_dir (enum replace_direction a, enum replace_direction b)
1092 {
1093   /* Implements the following table:
1094         |bo fw bw no
1095      ---+-----------
1096      bo |bo fw bw no
1097      fw |-- fw no no
1098      bw |-- -- bw no
1099      no |-- -- -- no.  */
1100 
1101   if (a == b)
1102     return a;
1103 
1104   if (a == dir_both)
1105     return b;
1106   if (b == dir_both)
1107     return a;
1108 
1109   return dir_none;
1110 }
1111 
1112 /* Examine I1 and I2 and return:
1113    - dir_forward if I1 can be replaced by I2, or
1114    - dir_backward if I2 can be replaced by I1, or
1115    - dir_both if both are the case.  */
1116 
1117 static enum replace_direction
old_insns_match_p(int mode ATTRIBUTE_UNUSED,rtx_insn * i1,rtx_insn * i2)1118 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx_insn *i1, rtx_insn *i2)
1119 {
1120   rtx p1, p2;
1121 
1122   /* Verify that I1 and I2 are equivalent.  */
1123   if (GET_CODE (i1) != GET_CODE (i2))
1124     return dir_none;
1125 
1126   /* __builtin_unreachable() may lead to empty blocks (ending with
1127      NOTE_INSN_BASIC_BLOCK).  They may be crossjumped. */
1128   if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
1129     return dir_both;
1130 
1131   /* ??? Do not allow cross-jumping between different stack levels.  */
1132   p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL);
1133   p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL);
1134   if (p1 && p2)
1135     {
1136       p1 = XEXP (p1, 0);
1137       p2 = XEXP (p2, 0);
1138       if (!rtx_equal_p (p1, p2))
1139         return dir_none;
1140 
1141       /* ??? Worse, this adjustment had better be constant lest we
1142          have differing incoming stack levels.  */
1143       if (!frame_pointer_needed
1144           && find_args_size_adjust (i1) == HOST_WIDE_INT_MIN)
1145 	return dir_none;
1146     }
1147   else if (p1 || p2)
1148     return dir_none;
1149 
1150   p1 = PATTERN (i1);
1151   p2 = PATTERN (i2);
1152 
1153   if (GET_CODE (p1) != GET_CODE (p2))
1154     return dir_none;
1155 
1156   /* If this is a CALL_INSN, compare register usage information.
1157      If we don't check this on stack register machines, the two
1158      CALL_INSNs might be merged leaving reg-stack.c with mismatching
1159      numbers of stack registers in the same basic block.
1160      If we don't check this on machines with delay slots, a delay slot may
1161      be filled that clobbers a parameter expected by the subroutine.
1162 
1163      ??? We take the simple route for now and assume that if they're
1164      equal, they were constructed identically.
1165 
1166      Also check for identical exception regions.  */
1167 
1168   if (CALL_P (i1))
1169     {
1170       /* Ensure the same EH region.  */
1171       rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
1172       rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);
1173 
1174       if (!n1 && n2)
1175 	return dir_none;
1176 
1177       if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1178 	return dir_none;
1179 
1180       if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1181 			CALL_INSN_FUNCTION_USAGE (i2))
1182 	  || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
1183 	return dir_none;
1184 
1185       /* For address sanitizer, never crossjump __asan_report_* builtins,
1186 	 otherwise errors might be reported on incorrect lines.  */
1187       if (flag_sanitize & SANITIZE_ADDRESS)
1188 	{
1189 	  rtx call = get_call_rtx_from (i1);
1190 	  if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
1191 	    {
1192 	      rtx symbol = XEXP (XEXP (call, 0), 0);
1193 	      if (SYMBOL_REF_DECL (symbol)
1194 		  && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
1195 		{
1196 		  if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
1197 		       == BUILT_IN_NORMAL)
1198 		      && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1199 			 >= BUILT_IN_ASAN_REPORT_LOAD1
1200 		      && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1201 			 <= BUILT_IN_ASAN_STOREN)
1202 		    return dir_none;
1203 		}
1204 	    }
1205 	}
1206     }
1207 
1208 #ifdef STACK_REGS
1209   /* If cross_jump_death_matters is not 0, the insn's mode
1210      indicates whether or not the insn contains any stack-like
1211      regs.  */
1212 
1213   if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1214     {
1215       /* If register stack conversion has already been done, then
1216 	 death notes must also be compared before it is certain that
1217 	 the two instruction streams match.  */
1218 
1219       rtx note;
1220       HARD_REG_SET i1_regset, i2_regset;
1221 
1222       CLEAR_HARD_REG_SET (i1_regset);
1223       CLEAR_HARD_REG_SET (i2_regset);
1224 
1225       for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1226 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1227 	  SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1228 
1229       for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1230 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1231 	  SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1232 
1233       if (!hard_reg_set_equal_p (i1_regset, i2_regset))
1234 	return dir_none;
1235     }
1236 #endif
1237 
1238   if (reload_completed
1239       ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1240     return dir_both;
1241 
1242   return can_replace_by (i1, i2);
1243 }
1244 
1245 /* When comparing insns I1 and I2 in flow_find_cross_jump or
1246    flow_find_head_matching_sequence, ensure the notes match.  */
1247 
1248 static void
merge_notes(rtx_insn * i1,rtx_insn * i2)1249 merge_notes (rtx_insn *i1, rtx_insn *i2)
1250 {
1251   /* If the merged insns have different REG_EQUAL notes, then
1252      remove them.  */
1253   rtx equiv1 = find_reg_equal_equiv_note (i1);
1254   rtx equiv2 = find_reg_equal_equiv_note (i2);
1255 
1256   if (equiv1 && !equiv2)
1257     remove_note (i1, equiv1);
1258   else if (!equiv1 && equiv2)
1259     remove_note (i2, equiv2);
1260   else if (equiv1 && equiv2
1261 	   && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1262     {
1263       remove_note (i1, equiv1);
1264       remove_note (i2, equiv2);
1265     }
1266 }
1267 
1268  /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
1269     resulting insn in I1, and the corresponding bb in BB1.  At the head of a
1270     bb, if there is a predecessor bb that reaches this bb via fallthru, and
1271     FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
1272     DID_FALLTHRU.  Otherwise, stops at the head of the bb.  */
1273 
1274 static void
walk_to_nondebug_insn(rtx_insn ** i1,basic_block * bb1,bool follow_fallthru,bool * did_fallthru)1275 walk_to_nondebug_insn (rtx_insn **i1, basic_block *bb1, bool follow_fallthru,
1276                        bool *did_fallthru)
1277 {
1278   edge fallthru;
1279 
1280   *did_fallthru = false;
1281 
1282   /* Ignore notes.  */
1283   while (!NONDEBUG_INSN_P (*i1))
1284     {
1285       if (*i1 != BB_HEAD (*bb1))
1286         {
1287           *i1 = PREV_INSN (*i1);
1288           continue;
1289         }
1290 
1291       if (!follow_fallthru)
1292         return;
1293 
1294       fallthru = find_fallthru_edge ((*bb1)->preds);
1295       if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1296           || !single_succ_p (fallthru->src))
1297         return;
1298 
1299       *bb1 = fallthru->src;
1300       *i1 = BB_END (*bb1);
1301       *did_fallthru = true;
1302      }
1303 }
1304 
1305 /* Look through the insns at the end of BB1 and BB2 and find the longest
1306    sequence that are either equivalent, or allow forward or backward
1307    replacement.  Store the first insns for that sequence in *F1 and *F2 and
1308    return the sequence length.
1309 
1310    DIR_P indicates the allowed replacement direction on function entry, and
1311    the actual replacement direction on function exit.  If NULL, only equivalent
1312    sequences are allowed.
1313 
1314    To simplify callers of this function, if the blocks match exactly,
1315    store the head of the blocks in *F1 and *F2.  */
1316 
1317 int
flow_find_cross_jump(basic_block bb1,basic_block bb2,rtx_insn ** f1,rtx_insn ** f2,enum replace_direction * dir_p)1318 flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx_insn **f1,
1319 		      rtx_insn **f2, enum replace_direction *dir_p)
1320 {
1321   rtx_insn *i1, *i2, *last1, *last2, *afterlast1, *afterlast2;
1322   int ninsns = 0;
1323   enum replace_direction dir, last_dir, afterlast_dir;
1324   bool follow_fallthru, did_fallthru;
1325 
1326   if (dir_p)
1327     dir = *dir_p;
1328   else
1329     dir = dir_both;
1330   afterlast_dir = dir;
1331   last_dir = afterlast_dir;
1332 
1333   /* Skip simple jumps at the end of the blocks.  Complex jumps still
1334      need to be compared for equivalence, which we'll do below.  */
1335 
1336   i1 = BB_END (bb1);
1337   last1 = afterlast1 = last2 = afterlast2 = NULL;
1338   if (onlyjump_p (i1)
1339       || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1340     {
1341       last1 = i1;
1342       i1 = PREV_INSN (i1);
1343     }
1344 
1345   i2 = BB_END (bb2);
1346   if (onlyjump_p (i2)
1347       || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1348     {
1349       last2 = i2;
1350       /* Count everything except for unconditional jump as insn.
1351 	 Don't count any jumps if dir_p is NULL.  */
1352       if (!simplejump_p (i2) && !returnjump_p (i2) && last1 && dir_p)
1353 	ninsns++;
1354       i2 = PREV_INSN (i2);
1355     }
1356 
1357   while (true)
1358     {
1359       /* In the following example, we can replace all jumps to C by jumps to A.
1360 
1361          This removes 4 duplicate insns.
1362          [bb A] insn1            [bb C] insn1
1363                 insn2                   insn2
1364          [bb B] insn3                   insn3
1365                 insn4                   insn4
1366                 jump_insn               jump_insn
1367 
1368          We could also replace all jumps to A by jumps to C, but that leaves B
1369          alive, and removes only 2 duplicate insns.  In a subsequent crossjump
1370          step, all jumps to B would be replaced with jumps to the middle of C,
1371          achieving the same result with more effort.
1372          So we allow only the first possibility, which means that we don't allow
1373          fallthru in the block that's being replaced.  */
1374 
1375       follow_fallthru = dir_p && dir != dir_forward;
1376       walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru);
1377       if (did_fallthru)
1378         dir = dir_backward;
1379 
1380       follow_fallthru = dir_p && dir != dir_backward;
1381       walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru);
1382       if (did_fallthru)
1383         dir = dir_forward;
1384 
1385       if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1386 	break;
1387 
1388       dir = merge_dir (dir, old_insns_match_p (0, i1, i2));
1389       if (dir == dir_none || (!dir_p && dir != dir_both))
1390 	break;
1391 
1392       merge_memattrs (i1, i2);
1393 
1394       /* Don't begin a cross-jump with a NOTE insn.  */
1395       if (INSN_P (i1))
1396 	{
1397 	  merge_notes (i1, i2);
1398 
1399 	  afterlast1 = last1, afterlast2 = last2;
1400 	  last1 = i1, last2 = i2;
1401 	  afterlast_dir = last_dir;
1402 	  last_dir = dir;
1403 	  if (active_insn_p (i1))
1404 	    ninsns++;
1405 	}
1406 
1407       i1 = PREV_INSN (i1);
1408       i2 = PREV_INSN (i2);
1409     }
1410 
1411   /* Don't allow the insn after a compare to be shared by
1412      cross-jumping unless the compare is also shared.  */
1413   if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
1414       && ! sets_cc0_p (last1))
1415     last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--;
1416 
1417   /* Include preceding notes and labels in the cross-jump.  One,
1418      this may bring us to the head of the blocks as requested above.
1419      Two, it keeps line number notes as matched as may be.  */
1420   if (ninsns)
1421     {
1422       bb1 = BLOCK_FOR_INSN (last1);
1423       while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
1424 	last1 = PREV_INSN (last1);
1425 
1426       if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1427 	last1 = PREV_INSN (last1);
1428 
1429       bb2 = BLOCK_FOR_INSN (last2);
1430       while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
1431 	last2 = PREV_INSN (last2);
1432 
1433       if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1434 	last2 = PREV_INSN (last2);
1435 
1436       *f1 = last1;
1437       *f2 = last2;
1438     }
1439 
1440   if (dir_p)
1441     *dir_p = last_dir;
1442   return ninsns;
1443 }
1444 
1445 /* Like flow_find_cross_jump, except start looking for a matching sequence from
1446    the head of the two blocks.  Do not include jumps at the end.
1447    If STOP_AFTER is nonzero, stop after finding that many matching
1448    instructions.  If STOP_AFTER is zero, count all INSN_P insns, if it is
1449    non-zero, only count active insns.  */
1450 
1451 int
flow_find_head_matching_sequence(basic_block bb1,basic_block bb2,rtx_insn ** f1,rtx_insn ** f2,int stop_after)1452 flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx_insn **f1,
1453 				  rtx_insn **f2, int stop_after)
1454 {
1455   rtx_insn *i1, *i2, *last1, *last2, *beforelast1, *beforelast2;
1456   int ninsns = 0;
1457   edge e;
1458   edge_iterator ei;
1459   int nehedges1 = 0, nehedges2 = 0;
1460 
1461   FOR_EACH_EDGE (e, ei, bb1->succs)
1462     if (e->flags & EDGE_EH)
1463       nehedges1++;
1464   FOR_EACH_EDGE (e, ei, bb2->succs)
1465     if (e->flags & EDGE_EH)
1466       nehedges2++;
1467 
1468   i1 = BB_HEAD (bb1);
1469   i2 = BB_HEAD (bb2);
1470   last1 = beforelast1 = last2 = beforelast2 = NULL;
1471 
1472   while (true)
1473     {
1474       /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG.  */
1475       while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
1476 	{
1477 	  if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG)
1478 	    break;
1479 	  i1 = NEXT_INSN (i1);
1480 	}
1481 
1482       while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
1483 	{
1484 	  if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG)
1485 	    break;
1486 	  i2 = NEXT_INSN (i2);
1487 	}
1488 
1489       if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1))
1490 	  || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2)))
1491 	break;
1492 
1493       if (NOTE_P (i1) || NOTE_P (i2)
1494 	  || JUMP_P (i1) || JUMP_P (i2))
1495 	break;
1496 
1497       /* A sanity check to make sure we're not merging insns with different
1498 	 effects on EH.  If only one of them ends a basic block, it shouldn't
1499 	 have an EH edge; if both end a basic block, there should be the same
1500 	 number of EH edges.  */
1501       if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
1502 	   && nehedges1 > 0)
1503 	  || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
1504 	      && nehedges2 > 0)
1505 	  || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
1506 	      && nehedges1 != nehedges2))
1507 	break;
1508 
1509       if (old_insns_match_p (0, i1, i2) != dir_both)
1510 	break;
1511 
1512       merge_memattrs (i1, i2);
1513 
1514       /* Don't begin a cross-jump with a NOTE insn.  */
1515       if (INSN_P (i1))
1516 	{
1517 	  merge_notes (i1, i2);
1518 
1519 	  beforelast1 = last1, beforelast2 = last2;
1520 	  last1 = i1, last2 = i2;
1521 	  if (!stop_after || active_insn_p (i1))
1522 	    ninsns++;
1523 	}
1524 
1525       if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
1526 	  || (stop_after > 0 && ninsns == stop_after))
1527 	break;
1528 
1529       i1 = NEXT_INSN (i1);
1530       i2 = NEXT_INSN (i2);
1531     }
1532 
1533   /* Don't allow a compare to be shared by cross-jumping unless the insn
1534      after the compare is also shared.  */
1535   if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
1536       && sets_cc0_p (last1))
1537     last1 = beforelast1, last2 = beforelast2, ninsns--;
1538 
1539   if (ninsns)
1540     {
1541       *f1 = last1;
1542       *f2 = last2;
1543     }
1544 
1545   return ninsns;
1546 }
1547 
1548 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1549    the branch instruction.  This means that if we commonize the control
1550    flow before end of the basic block, the semantic remains unchanged.
1551 
1552    We may assume that there exists one edge with a common destination.  */
1553 
1554 static bool
outgoing_edges_match(int mode,basic_block bb1,basic_block bb2)1555 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1556 {
1557   int nehedges1 = 0, nehedges2 = 0;
1558   edge fallthru1 = 0, fallthru2 = 0;
1559   edge e1, e2;
1560   edge_iterator ei;
1561 
1562   /* If we performed shrink-wrapping, edges to the exit block can
1563      only be distinguished for JUMP_INSNs.  The two paths may differ in
1564      whether they went through the prologue.  Sibcalls are fine, we know
1565      that we either didn't need or inserted an epilogue before them.  */
1566   if (crtl->shrink_wrapped
1567       && single_succ_p (bb1)
1568       && single_succ (bb1) == EXIT_BLOCK_PTR_FOR_FN (cfun)
1569       && !JUMP_P (BB_END (bb1))
1570       && !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1))))
1571     return false;
1572 
1573   /* If BB1 has only one successor, we may be looking at either an
1574      unconditional jump, or a fake edge to exit.  */
1575   if (single_succ_p (bb1)
1576       && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1577       && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1578     return (single_succ_p (bb2)
1579 	    && (single_succ_edge (bb2)->flags
1580 		& (EDGE_COMPLEX | EDGE_FAKE)) == 0
1581 	    && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1582 
1583   /* Match conditional jumps - this may get tricky when fallthru and branch
1584      edges are crossed.  */
1585   if (EDGE_COUNT (bb1->succs) == 2
1586       && any_condjump_p (BB_END (bb1))
1587       && onlyjump_p (BB_END (bb1)))
1588     {
1589       edge b1, f1, b2, f2;
1590       bool reverse, match;
1591       rtx set1, set2, cond1, cond2;
1592       enum rtx_code code1, code2;
1593 
1594       if (EDGE_COUNT (bb2->succs) != 2
1595 	  || !any_condjump_p (BB_END (bb2))
1596 	  || !onlyjump_p (BB_END (bb2)))
1597 	return false;
1598 
1599       b1 = BRANCH_EDGE (bb1);
1600       b2 = BRANCH_EDGE (bb2);
1601       f1 = FALLTHRU_EDGE (bb1);
1602       f2 = FALLTHRU_EDGE (bb2);
1603 
1604       /* Get around possible forwarders on fallthru edges.  Other cases
1605 	 should be optimized out already.  */
1606       if (FORWARDER_BLOCK_P (f1->dest))
1607 	f1 = single_succ_edge (f1->dest);
1608 
1609       if (FORWARDER_BLOCK_P (f2->dest))
1610 	f2 = single_succ_edge (f2->dest);
1611 
1612       /* To simplify use of this function, return false if there are
1613 	 unneeded forwarder blocks.  These will get eliminated later
1614 	 during cleanup_cfg.  */
1615       if (FORWARDER_BLOCK_P (f1->dest)
1616 	  || FORWARDER_BLOCK_P (f2->dest)
1617 	  || FORWARDER_BLOCK_P (b1->dest)
1618 	  || FORWARDER_BLOCK_P (b2->dest))
1619 	return false;
1620 
1621       if (f1->dest == f2->dest && b1->dest == b2->dest)
1622 	reverse = false;
1623       else if (f1->dest == b2->dest && b1->dest == f2->dest)
1624 	reverse = true;
1625       else
1626 	return false;
1627 
1628       set1 = pc_set (BB_END (bb1));
1629       set2 = pc_set (BB_END (bb2));
1630       if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1631 	  != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1632 	reverse = !reverse;
1633 
1634       cond1 = XEXP (SET_SRC (set1), 0);
1635       cond2 = XEXP (SET_SRC (set2), 0);
1636       code1 = GET_CODE (cond1);
1637       if (reverse)
1638 	code2 = reversed_comparison_code (cond2, BB_END (bb2));
1639       else
1640 	code2 = GET_CODE (cond2);
1641 
1642       if (code2 == UNKNOWN)
1643 	return false;
1644 
1645       /* Verify codes and operands match.  */
1646       match = ((code1 == code2
1647 		&& rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1648 		&& rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1649 	       || (code1 == swap_condition (code2)
1650 		   && rtx_renumbered_equal_p (XEXP (cond1, 1),
1651 					      XEXP (cond2, 0))
1652 		   && rtx_renumbered_equal_p (XEXP (cond1, 0),
1653 					      XEXP (cond2, 1))));
1654 
1655       /* If we return true, we will join the blocks.  Which means that
1656 	 we will only have one branch prediction bit to work with.  Thus
1657 	 we require the existing branches to have probabilities that are
1658 	 roughly similar.  */
1659       if (match
1660 	  && optimize_bb_for_speed_p (bb1)
1661 	  && optimize_bb_for_speed_p (bb2))
1662 	{
1663 	  int prob2;
1664 
1665 	  if (b1->dest == b2->dest)
1666 	    prob2 = b2->probability;
1667 	  else
1668 	    /* Do not use f2 probability as f2 may be forwarded.  */
1669 	    prob2 = REG_BR_PROB_BASE - b2->probability;
1670 
1671 	  /* Fail if the difference in probabilities is greater than 50%.
1672 	     This rules out two well-predicted branches with opposite
1673 	     outcomes.  */
1674 	  if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1675 	    {
1676 	      if (dump_file)
1677 		fprintf (dump_file,
1678 			 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1679 			 bb1->index, bb2->index, b1->probability, prob2);
1680 
1681 	      return false;
1682 	    }
1683 	}
1684 
1685       if (dump_file && match)
1686 	fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1687 		 bb1->index, bb2->index);
1688 
1689       return match;
1690     }
1691 
1692   /* Generic case - we are seeing a computed jump, table jump or trapping
1693      instruction.  */
1694 
1695   /* Check whether there are tablejumps in the end of BB1 and BB2.
1696      Return true if they are identical.  */
1697     {
1698       rtx label1, label2;
1699       rtx_jump_table_data *table1, *table2;
1700 
1701       if (tablejump_p (BB_END (bb1), &label1, &table1)
1702 	  && tablejump_p (BB_END (bb2), &label2, &table2)
1703 	  && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1704 	{
1705 	  /* The labels should never be the same rtx.  If they really are same
1706 	     the jump tables are same too. So disable crossjumping of blocks BB1
1707 	     and BB2 because when deleting the common insns in the end of BB1
1708 	     by delete_basic_block () the jump table would be deleted too.  */
1709 	  /* If LABEL2 is referenced in BB1->END do not do anything
1710 	     because we would loose information when replacing
1711 	     LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END.  */
1712 	  if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1713 	    {
1714 	      /* Set IDENTICAL to true when the tables are identical.  */
1715 	      bool identical = false;
1716 	      rtx p1, p2;
1717 
1718 	      p1 = PATTERN (table1);
1719 	      p2 = PATTERN (table2);
1720 	      if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1721 		{
1722 		  identical = true;
1723 		}
1724 	      else if (GET_CODE (p1) == ADDR_DIFF_VEC
1725 		       && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1726 		       && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1727 		       && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1728 		{
1729 		  int i;
1730 
1731 		  identical = true;
1732 		  for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1733 		    if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1734 		      identical = false;
1735 		}
1736 
1737 	      if (identical)
1738 		{
1739 		  bool match;
1740 
1741 		  /* Temporarily replace references to LABEL1 with LABEL2
1742 		     in BB1->END so that we could compare the instructions.  */
1743 		  replace_label_in_insn (BB_END (bb1), label1, label2, false);
1744 
1745 		  match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))
1746 			   == dir_both);
1747 		  if (dump_file && match)
1748 		    fprintf (dump_file,
1749 			     "Tablejumps in bb %i and %i match.\n",
1750 			     bb1->index, bb2->index);
1751 
1752 		  /* Set the original label in BB1->END because when deleting
1753 		     a block whose end is a tablejump, the tablejump referenced
1754 		     from the instruction is deleted too.  */
1755 		  replace_label_in_insn (BB_END (bb1), label2, label1, false);
1756 
1757 		  return match;
1758 		}
1759 	    }
1760 	  return false;
1761 	}
1762     }
1763 
1764   /* Find the last non-debug non-note instruction in each bb, except
1765      stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p
1766      handles that case specially. old_insns_match_p does not handle
1767      other types of instruction notes.  */
1768   rtx_insn *last1 = BB_END (bb1);
1769   rtx_insn *last2 = BB_END (bb2);
1770   while (!NOTE_INSN_BASIC_BLOCK_P (last1) &&
1771          (DEBUG_INSN_P (last1) || NOTE_P (last1)))
1772     last1 = PREV_INSN (last1);
1773   while (!NOTE_INSN_BASIC_BLOCK_P (last2) &&
1774          (DEBUG_INSN_P (last2) || NOTE_P (last2)))
1775     last2 = PREV_INSN (last2);
1776   gcc_assert (last1 && last2);
1777 
1778   /* First ensure that the instructions match.  There may be many outgoing
1779      edges so this test is generally cheaper.  */
1780   if (old_insns_match_p (mode, last1, last2) != dir_both)
1781     return false;
1782 
1783   /* Search the outgoing edges, ensure that the counts do match, find possible
1784      fallthru and exception handling edges since these needs more
1785      validation.  */
1786   if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1787     return false;
1788 
1789   bool nonfakeedges = false;
1790   FOR_EACH_EDGE (e1, ei, bb1->succs)
1791     {
1792       e2 = EDGE_SUCC (bb2, ei.index);
1793 
1794       if ((e1->flags & EDGE_FAKE) == 0)
1795 	nonfakeedges = true;
1796 
1797       if (e1->flags & EDGE_EH)
1798 	nehedges1++;
1799 
1800       if (e2->flags & EDGE_EH)
1801 	nehedges2++;
1802 
1803       if (e1->flags & EDGE_FALLTHRU)
1804 	fallthru1 = e1;
1805       if (e2->flags & EDGE_FALLTHRU)
1806 	fallthru2 = e2;
1807     }
1808 
1809   /* If number of edges of various types does not match, fail.  */
1810   if (nehedges1 != nehedges2
1811       || (fallthru1 != 0) != (fallthru2 != 0))
1812     return false;
1813 
1814   /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors
1815      and the last real insn doesn't have REG_ARGS_SIZE note, don't
1816      attempt to optimize, as the two basic blocks might have different
1817      REG_ARGS_SIZE depths.  For noreturn calls and unconditional
1818      traps there should be REG_ARG_SIZE notes, they could be missing
1819      for __builtin_unreachable () uses though.  */
1820   if (!nonfakeedges
1821       && !ACCUMULATE_OUTGOING_ARGS
1822       && (!INSN_P (last1)
1823           || !find_reg_note (last1, REG_ARGS_SIZE, NULL)))
1824     return false;
1825 
1826   /* fallthru edges must be forwarded to the same destination.  */
1827   if (fallthru1)
1828     {
1829       basic_block d1 = (forwarder_block_p (fallthru1->dest)
1830 			? single_succ (fallthru1->dest): fallthru1->dest);
1831       basic_block d2 = (forwarder_block_p (fallthru2->dest)
1832 			? single_succ (fallthru2->dest): fallthru2->dest);
1833 
1834       if (d1 != d2)
1835 	return false;
1836     }
1837 
1838   /* Ensure the same EH region.  */
1839   {
1840     rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1841     rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1842 
1843     if (!n1 && n2)
1844       return false;
1845 
1846     if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1847       return false;
1848   }
1849 
1850   /* The same checks as in try_crossjump_to_edge. It is required for RTL
1851      version of sequence abstraction.  */
1852   FOR_EACH_EDGE (e1, ei, bb2->succs)
1853     {
1854       edge e2;
1855       edge_iterator ei;
1856       basic_block d1 = e1->dest;
1857 
1858       if (FORWARDER_BLOCK_P (d1))
1859         d1 = EDGE_SUCC (d1, 0)->dest;
1860 
1861       FOR_EACH_EDGE (e2, ei, bb1->succs)
1862         {
1863           basic_block d2 = e2->dest;
1864           if (FORWARDER_BLOCK_P (d2))
1865             d2 = EDGE_SUCC (d2, 0)->dest;
1866           if (d1 == d2)
1867             break;
1868         }
1869 
1870       if (!e2)
1871         return false;
1872     }
1873 
1874   return true;
1875 }
1876 
1877 /* Returns true if BB basic block has a preserve label.  */
1878 
1879 static bool
block_has_preserve_label(basic_block bb)1880 block_has_preserve_label (basic_block bb)
1881 {
1882   return (bb
1883           && block_label (bb)
1884           && LABEL_PRESERVE_P (block_label (bb)));
1885 }
1886 
1887 /* E1 and E2 are edges with the same destination block.  Search their
1888    predecessors for common code.  If found, redirect control flow from
1889    (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
1890    or the other way around (dir_backward).  DIR specifies the allowed
1891    replacement direction.  */
1892 
1893 static bool
try_crossjump_to_edge(int mode,edge e1,edge e2,enum replace_direction dir)1894 try_crossjump_to_edge (int mode, edge e1, edge e2,
1895                        enum replace_direction dir)
1896 {
1897   int nmatch;
1898   basic_block src1 = e1->src, src2 = e2->src;
1899   basic_block redirect_to, redirect_from, to_remove;
1900   basic_block osrc1, osrc2, redirect_edges_to, tmp;
1901   rtx_insn *newpos1, *newpos2;
1902   edge s;
1903   edge_iterator ei;
1904 
1905   newpos1 = newpos2 = NULL;
1906 
1907   /* If we have partitioned hot/cold basic blocks, it is a bad idea
1908      to try this optimization.
1909 
1910      Basic block partitioning may result in some jumps that appear to
1911      be optimizable (or blocks that appear to be mergeable), but which really
1912      must be left untouched (they are required to make it safely across
1913      partition boundaries).  See the comments at the top of
1914      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
1915 
1916   if (crtl->has_bb_partition && reload_completed)
1917     return false;
1918 
1919   /* Search backward through forwarder blocks.  We don't need to worry
1920      about multiple entry or chained forwarders, as they will be optimized
1921      away.  We do this to look past the unconditional jump following a
1922      conditional jump that is required due to the current CFG shape.  */
1923   if (single_pred_p (src1)
1924       && FORWARDER_BLOCK_P (src1))
1925     e1 = single_pred_edge (src1), src1 = e1->src;
1926 
1927   if (single_pred_p (src2)
1928       && FORWARDER_BLOCK_P (src2))
1929     e2 = single_pred_edge (src2), src2 = e2->src;
1930 
1931   /* Nothing to do if we reach ENTRY, or a common source block.  */
1932   if (src1 == ENTRY_BLOCK_PTR_FOR_FN (cfun) || src2
1933       == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1934     return false;
1935   if (src1 == src2)
1936     return false;
1937 
1938   /* Seeing more than 1 forwarder blocks would confuse us later...  */
1939   if (FORWARDER_BLOCK_P (e1->dest)
1940       && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1941     return false;
1942 
1943   if (FORWARDER_BLOCK_P (e2->dest)
1944       && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1945     return false;
1946 
1947   /* Likewise with dead code (possibly newly created by the other optimizations
1948      of cfg_cleanup).  */
1949   if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1950     return false;
1951 
1952   /* Look for the common insn sequence, part the first ...  */
1953   if (!outgoing_edges_match (mode, src1, src2))
1954     return false;
1955 
1956   /* ... and part the second.  */
1957   nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir);
1958 
1959   osrc1 = src1;
1960   osrc2 = src2;
1961   if (newpos1 != NULL_RTX)
1962     src1 = BLOCK_FOR_INSN (newpos1);
1963   if (newpos2 != NULL_RTX)
1964     src2 = BLOCK_FOR_INSN (newpos2);
1965 
1966   if (dir == dir_backward)
1967     {
1968 #define SWAP(T, X, Y) do { T tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
1969       SWAP (basic_block, osrc1, osrc2);
1970       SWAP (basic_block, src1, src2);
1971       SWAP (edge, e1, e2);
1972       SWAP (rtx_insn *, newpos1, newpos2);
1973 #undef SWAP
1974     }
1975 
1976   /* Don't proceed with the crossjump unless we found a sufficient number
1977      of matching instructions or the 'from' block was totally matched
1978      (such that its predecessors will hopefully be redirected and the
1979      block removed).  */
1980   if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1981       && (newpos1 != BB_HEAD (src1)))
1982     return false;
1983 
1984   /* Avoid deleting preserve label when redirecting ABNORMAL edges.  */
1985   if (block_has_preserve_label (e1->dest)
1986       && (e1->flags & EDGE_ABNORMAL))
1987     return false;
1988 
1989   /* Here we know that the insns in the end of SRC1 which are common with SRC2
1990      will be deleted.
1991      If we have tablejumps in the end of SRC1 and SRC2
1992      they have been already compared for equivalence in outgoing_edges_match ()
1993      so replace the references to TABLE1 by references to TABLE2.  */
1994   {
1995       rtx label1, label2;
1996       rtx_jump_table_data *table1, *table2;
1997 
1998       if (tablejump_p (BB_END (osrc1), &label1, &table1)
1999 	  && tablejump_p (BB_END (osrc2), &label2, &table2)
2000 	  && label1 != label2)
2001 	{
2002 	  rtx_insn *insn;
2003 
2004 	  /* Replace references to LABEL1 with LABEL2.  */
2005 	  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2006 	    {
2007 	      /* Do not replace the label in SRC1->END because when deleting
2008 		 a block whose end is a tablejump, the tablejump referenced
2009 		 from the instruction is deleted too.  */
2010 	      if (insn != BB_END (osrc1))
2011 		replace_label_in_insn (insn, label1, label2, true);
2012 	    }
2013 	}
2014   }
2015 
2016   /* Avoid splitting if possible.  We must always split when SRC2 has
2017      EH predecessor edges, or we may end up with basic blocks with both
2018      normal and EH predecessor edges.  */
2019   if (newpos2 == BB_HEAD (src2)
2020       && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
2021     redirect_to = src2;
2022   else
2023     {
2024       if (newpos2 == BB_HEAD (src2))
2025 	{
2026 	  /* Skip possible basic block header.  */
2027 	  if (LABEL_P (newpos2))
2028 	    newpos2 = NEXT_INSN (newpos2);
2029 	  while (DEBUG_INSN_P (newpos2))
2030 	    newpos2 = NEXT_INSN (newpos2);
2031 	  if (NOTE_P (newpos2))
2032 	    newpos2 = NEXT_INSN (newpos2);
2033 	  while (DEBUG_INSN_P (newpos2))
2034 	    newpos2 = NEXT_INSN (newpos2);
2035 	}
2036 
2037       if (dump_file)
2038 	fprintf (dump_file, "Splitting bb %i before %i insns\n",
2039 		 src2->index, nmatch);
2040       redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
2041     }
2042 
2043   if (dump_file)
2044     fprintf (dump_file,
2045 	     "Cross jumping from bb %i to bb %i; %i common insns\n",
2046 	     src1->index, src2->index, nmatch);
2047 
2048   /* We may have some registers visible through the block.  */
2049   df_set_bb_dirty (redirect_to);
2050 
2051   if (osrc2 == src2)
2052     redirect_edges_to = redirect_to;
2053   else
2054     redirect_edges_to = osrc2;
2055 
2056   /* Recompute the frequencies and counts of outgoing edges.  */
2057   FOR_EACH_EDGE (s, ei, redirect_edges_to->succs)
2058     {
2059       edge s2;
2060       edge_iterator ei;
2061       basic_block d = s->dest;
2062 
2063       if (FORWARDER_BLOCK_P (d))
2064 	d = single_succ (d);
2065 
2066       FOR_EACH_EDGE (s2, ei, src1->succs)
2067 	{
2068 	  basic_block d2 = s2->dest;
2069 	  if (FORWARDER_BLOCK_P (d2))
2070 	    d2 = single_succ (d2);
2071 	  if (d == d2)
2072 	    break;
2073 	}
2074 
2075       s->count += s2->count;
2076 
2077       /* Take care to update possible forwarder blocks.  We verified
2078 	 that there is no more than one in the chain, so we can't run
2079 	 into infinite loop.  */
2080       if (FORWARDER_BLOCK_P (s->dest))
2081 	{
2082 	  single_succ_edge (s->dest)->count += s2->count;
2083 	  s->dest->count += s2->count;
2084 	  s->dest->frequency += EDGE_FREQUENCY (s);
2085 	}
2086 
2087       if (FORWARDER_BLOCK_P (s2->dest))
2088 	{
2089 	  single_succ_edge (s2->dest)->count -= s2->count;
2090 	  if (single_succ_edge (s2->dest)->count < 0)
2091 	    single_succ_edge (s2->dest)->count = 0;
2092 	  s2->dest->count -= s2->count;
2093 	  s2->dest->frequency -= EDGE_FREQUENCY (s);
2094 	  if (s2->dest->frequency < 0)
2095 	    s2->dest->frequency = 0;
2096 	  if (s2->dest->count < 0)
2097 	    s2->dest->count = 0;
2098 	}
2099 
2100       if (!redirect_edges_to->frequency && !src1->frequency)
2101 	s->probability = (s->probability + s2->probability) / 2;
2102       else
2103 	s->probability
2104 	  = ((s->probability * redirect_edges_to->frequency +
2105 	      s2->probability * src1->frequency)
2106 	     / (redirect_edges_to->frequency + src1->frequency));
2107     }
2108 
2109   /* Adjust count and frequency for the block.  An earlier jump
2110      threading pass may have left the profile in an inconsistent
2111      state (see update_bb_profile_for_threading) so we must be
2112      prepared for overflows.  */
2113   tmp = redirect_to;
2114   do
2115     {
2116       tmp->count += src1->count;
2117       tmp->frequency += src1->frequency;
2118       if (tmp->frequency > BB_FREQ_MAX)
2119         tmp->frequency = BB_FREQ_MAX;
2120       if (tmp == redirect_edges_to)
2121         break;
2122       tmp = find_fallthru_edge (tmp->succs)->dest;
2123     }
2124   while (true);
2125   update_br_prob_note (redirect_edges_to);
2126 
2127   /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1.  */
2128 
2129   /* Skip possible basic block header.  */
2130   if (LABEL_P (newpos1))
2131     newpos1 = NEXT_INSN (newpos1);
2132 
2133   while (DEBUG_INSN_P (newpos1))
2134     newpos1 = NEXT_INSN (newpos1);
2135 
2136   if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
2137     newpos1 = NEXT_INSN (newpos1);
2138 
2139   while (DEBUG_INSN_P (newpos1))
2140     newpos1 = NEXT_INSN (newpos1);
2141 
2142   redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
2143   to_remove = single_succ (redirect_from);
2144 
2145   redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
2146   delete_basic_block (to_remove);
2147 
2148   update_forwarder_flag (redirect_from);
2149   if (redirect_to != src2)
2150     update_forwarder_flag (src2);
2151 
2152   return true;
2153 }
2154 
2155 /* Search the predecessors of BB for common insn sequences.  When found,
2156    share code between them by redirecting control flow.  Return true if
2157    any changes made.  */
2158 
2159 static bool
try_crossjump_bb(int mode,basic_block bb)2160 try_crossjump_bb (int mode, basic_block bb)
2161 {
2162   edge e, e2, fallthru;
2163   bool changed;
2164   unsigned max, ix, ix2;
2165 
2166   /* Nothing to do if there is not at least two incoming edges.  */
2167   if (EDGE_COUNT (bb->preds) < 2)
2168     return false;
2169 
2170   /* Don't crossjump if this block ends in a computed jump,
2171      unless we are optimizing for size.  */
2172   if (optimize_bb_for_size_p (bb)
2173       && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
2174       && computed_jump_p (BB_END (bb)))
2175     return false;
2176 
2177   /* If we are partitioning hot/cold basic blocks, we don't want to
2178      mess up unconditional or indirect jumps that cross between hot
2179      and cold sections.
2180 
2181      Basic block partitioning may result in some jumps that appear to
2182      be optimizable (or blocks that appear to be mergeable), but which really
2183      must be left untouched (they are required to make it safely across
2184      partition boundaries).  See the comments at the top of
2185      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
2186 
2187   if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
2188 					BB_PARTITION (EDGE_PRED (bb, 1)->src)
2189       || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
2190     return false;
2191 
2192   /* It is always cheapest to redirect a block that ends in a branch to
2193      a block that falls through into BB, as that adds no branches to the
2194      program.  We'll try that combination first.  */
2195   fallthru = NULL;
2196   max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
2197 
2198   if (EDGE_COUNT (bb->preds) > max)
2199     return false;
2200 
2201   fallthru = find_fallthru_edge (bb->preds);
2202 
2203   changed = false;
2204   for (ix = 0; ix < EDGE_COUNT (bb->preds);)
2205     {
2206       e = EDGE_PRED (bb, ix);
2207       ix++;
2208 
2209       /* As noted above, first try with the fallthru predecessor (or, a
2210 	 fallthru predecessor if we are in cfglayout mode).  */
2211       if (fallthru)
2212 	{
2213 	  /* Don't combine the fallthru edge into anything else.
2214 	     If there is a match, we'll do it the other way around.  */
2215 	  if (e == fallthru)
2216 	    continue;
2217 	  /* If nothing changed since the last attempt, there is nothing
2218 	     we can do.  */
2219 	  if (!first_pass
2220 	      && !((e->src->flags & BB_MODIFIED)
2221 		   || (fallthru->src->flags & BB_MODIFIED)))
2222 	    continue;
2223 
2224 	  if (try_crossjump_to_edge (mode, e, fallthru, dir_forward))
2225 	    {
2226 	      changed = true;
2227 	      ix = 0;
2228 	      continue;
2229 	    }
2230 	}
2231 
2232       /* Non-obvious work limiting check: Recognize that we're going
2233 	 to call try_crossjump_bb on every basic block.  So if we have
2234 	 two blocks with lots of outgoing edges (a switch) and they
2235 	 share lots of common destinations, then we would do the
2236 	 cross-jump check once for each common destination.
2237 
2238 	 Now, if the blocks actually are cross-jump candidates, then
2239 	 all of their destinations will be shared.  Which means that
2240 	 we only need check them for cross-jump candidacy once.  We
2241 	 can eliminate redundant checks of crossjump(A,B) by arbitrarily
2242 	 choosing to do the check from the block for which the edge
2243 	 in question is the first successor of A.  */
2244       if (EDGE_SUCC (e->src, 0) != e)
2245 	continue;
2246 
2247       for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++)
2248 	{
2249 	  e2 = EDGE_PRED (bb, ix2);
2250 
2251 	  if (e2 == e)
2252 	    continue;
2253 
2254 	  /* We've already checked the fallthru edge above.  */
2255 	  if (e2 == fallthru)
2256 	    continue;
2257 
2258 	  /* The "first successor" check above only prevents multiple
2259 	     checks of crossjump(A,B).  In order to prevent redundant
2260 	     checks of crossjump(B,A), require that A be the block
2261 	     with the lowest index.  */
2262 	  if (e->src->index > e2->src->index)
2263 	    continue;
2264 
2265 	  /* If nothing changed since the last attempt, there is nothing
2266 	     we can do.  */
2267 	  if (!first_pass
2268 	      && !((e->src->flags & BB_MODIFIED)
2269 		   || (e2->src->flags & BB_MODIFIED)))
2270 	    continue;
2271 
2272 	  /* Both e and e2 are not fallthru edges, so we can crossjump in either
2273 	     direction.  */
2274 	  if (try_crossjump_to_edge (mode, e, e2, dir_both))
2275 	    {
2276 	      changed = true;
2277 	      ix = 0;
2278 	      break;
2279 	    }
2280 	}
2281     }
2282 
2283   if (changed)
2284     crossjumps_occured = true;
2285 
2286   return changed;
2287 }
2288 
2289 /* Search the successors of BB for common insn sequences.  When found,
2290    share code between them by moving it across the basic block
2291    boundary.  Return true if any changes made.  */
2292 
2293 static bool
try_head_merge_bb(basic_block bb)2294 try_head_merge_bb (basic_block bb)
2295 {
2296   basic_block final_dest_bb = NULL;
2297   int max_match = INT_MAX;
2298   edge e0;
2299   rtx_insn **headptr, **currptr, **nextptr;
2300   bool changed, moveall;
2301   unsigned ix;
2302   rtx_insn *e0_last_head;
2303   rtx cond;
2304   rtx_insn *move_before;
2305   unsigned nedges = EDGE_COUNT (bb->succs);
2306   rtx_insn *jump = BB_END (bb);
2307   regset live, live_union;
2308 
2309   /* Nothing to do if there is not at least two outgoing edges.  */
2310   if (nedges < 2)
2311     return false;
2312 
2313   /* Don't crossjump if this block ends in a computed jump,
2314      unless we are optimizing for size.  */
2315   if (optimize_bb_for_size_p (bb)
2316       && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
2317       && computed_jump_p (BB_END (bb)))
2318     return false;
2319 
2320   cond = get_condition (jump, &move_before, true, false);
2321   if (cond == NULL_RTX)
2322     {
2323       if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2324 	move_before = prev_nonnote_nondebug_insn (jump);
2325       else
2326 	move_before = jump;
2327     }
2328 
2329   for (ix = 0; ix < nedges; ix++)
2330     if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
2331       return false;
2332 
2333   for (ix = 0; ix < nedges; ix++)
2334     {
2335       edge e = EDGE_SUCC (bb, ix);
2336       basic_block other_bb = e->dest;
2337 
2338       if (df_get_bb_dirty (other_bb))
2339 	{
2340 	  block_was_dirty = true;
2341 	  return false;
2342 	}
2343 
2344       if (e->flags & EDGE_ABNORMAL)
2345 	return false;
2346 
2347       /* Normally, all destination blocks must only be reachable from this
2348 	 block, i.e. they must have one incoming edge.
2349 
2350 	 There is one special case we can handle, that of multiple consecutive
2351 	 jumps where the first jumps to one of the targets of the second jump.
2352 	 This happens frequently in switch statements for default labels.
2353 	 The structure is as follows:
2354 	 FINAL_DEST_BB
2355 	 ....
2356 	 if (cond) jump A;
2357 	 fall through
2358 	 BB
2359 	 jump with targets A, B, C, D...
2360 	 A
2361 	 has two incoming edges, from FINAL_DEST_BB and BB
2362 
2363 	 In this case, we can try to move the insns through BB and into
2364 	 FINAL_DEST_BB.  */
2365       if (EDGE_COUNT (other_bb->preds) != 1)
2366 	{
2367 	  edge incoming_edge, incoming_bb_other_edge;
2368 	  edge_iterator ei;
2369 
2370 	  if (final_dest_bb != NULL
2371 	      || EDGE_COUNT (other_bb->preds) != 2)
2372 	    return false;
2373 
2374 	  /* We must be able to move the insns across the whole block.  */
2375 	  move_before = BB_HEAD (bb);
2376 	  while (!NONDEBUG_INSN_P (move_before))
2377 	    move_before = NEXT_INSN (move_before);
2378 
2379 	  if (EDGE_COUNT (bb->preds) != 1)
2380 	    return false;
2381 	  incoming_edge = EDGE_PRED (bb, 0);
2382 	  final_dest_bb = incoming_edge->src;
2383 	  if (EDGE_COUNT (final_dest_bb->succs) != 2)
2384 	    return false;
2385 	  FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs)
2386 	    if (incoming_bb_other_edge != incoming_edge)
2387 	      break;
2388 	  if (incoming_bb_other_edge->dest != other_bb)
2389 	    return false;
2390 	}
2391     }
2392 
2393   e0 = EDGE_SUCC (bb, 0);
2394   e0_last_head = NULL;
2395   changed = false;
2396 
2397   for (ix = 1; ix < nedges; ix++)
2398     {
2399       edge e = EDGE_SUCC (bb, ix);
2400       rtx_insn *e0_last, *e_last;
2401       int nmatch;
2402 
2403       nmatch = flow_find_head_matching_sequence (e0->dest, e->dest,
2404 						 &e0_last, &e_last, 0);
2405       if (nmatch == 0)
2406 	return false;
2407 
2408       if (nmatch < max_match)
2409 	{
2410 	  max_match = nmatch;
2411 	  e0_last_head = e0_last;
2412 	}
2413     }
2414 
2415   /* If we matched an entire block, we probably have to avoid moving the
2416      last insn.  */
2417   if (max_match > 0
2418       && e0_last_head == BB_END (e0->dest)
2419       && (find_reg_note (e0_last_head, REG_EH_REGION, 0)
2420 	  || control_flow_insn_p (e0_last_head)))
2421     {
2422       max_match--;
2423       if (max_match == 0)
2424 	return false;
2425       do
2426 	e0_last_head = prev_real_insn (e0_last_head);
2427       while (DEBUG_INSN_P (e0_last_head));
2428     }
2429 
2430   if (max_match == 0)
2431     return false;
2432 
2433   /* We must find a union of the live registers at each of the end points.  */
2434   live = BITMAP_ALLOC (NULL);
2435   live_union = BITMAP_ALLOC (NULL);
2436 
2437   currptr = XNEWVEC (rtx_insn *, nedges);
2438   headptr = XNEWVEC (rtx_insn *, nedges);
2439   nextptr = XNEWVEC (rtx_insn *, nedges);
2440 
2441   for (ix = 0; ix < nedges; ix++)
2442     {
2443       int j;
2444       basic_block merge_bb = EDGE_SUCC (bb, ix)->dest;
2445       rtx_insn *head = BB_HEAD (merge_bb);
2446 
2447       while (!NONDEBUG_INSN_P (head))
2448 	head = NEXT_INSN (head);
2449       headptr[ix] = head;
2450       currptr[ix] = head;
2451 
2452       /* Compute the end point and live information  */
2453       for (j = 1; j < max_match; j++)
2454 	do
2455 	  head = NEXT_INSN (head);
2456 	while (!NONDEBUG_INSN_P (head));
2457       simulate_backwards_to_point (merge_bb, live, head);
2458       IOR_REG_SET (live_union, live);
2459     }
2460 
2461   /* If we're moving across two blocks, verify the validity of the
2462      first move, then adjust the target and let the loop below deal
2463      with the final move.  */
2464   if (final_dest_bb != NULL)
2465     {
2466       rtx_insn *move_upto;
2467 
2468       moveall = can_move_insns_across (currptr[0], e0_last_head, move_before,
2469 				       jump, e0->dest, live_union,
2470 				       NULL, &move_upto);
2471       if (!moveall)
2472 	{
2473 	  if (move_upto == NULL_RTX)
2474 	    goto out;
2475 
2476 	  while (e0_last_head != move_upto)
2477 	    {
2478 	      df_simulate_one_insn_backwards (e0->dest, e0_last_head,
2479 					      live_union);
2480 	      e0_last_head = PREV_INSN (e0_last_head);
2481 	    }
2482 	}
2483       if (e0_last_head == NULL_RTX)
2484 	goto out;
2485 
2486       jump = BB_END (final_dest_bb);
2487       cond = get_condition (jump, &move_before, true, false);
2488       if (cond == NULL_RTX)
2489 	{
2490 	  if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2491 	    move_before = prev_nonnote_nondebug_insn (jump);
2492 	  else
2493 	    move_before = jump;
2494 	}
2495     }
2496 
2497   do
2498     {
2499       rtx_insn *move_upto;
2500       moveall = can_move_insns_across (currptr[0], e0_last_head,
2501 				       move_before, jump, e0->dest, live_union,
2502 				       NULL, &move_upto);
2503       if (!moveall && move_upto == NULL_RTX)
2504 	{
2505 	  if (jump == move_before)
2506 	    break;
2507 
2508 	  /* Try again, using a different insertion point.  */
2509 	  move_before = jump;
2510 
2511 	  /* Don't try moving before a cc0 user, as that may invalidate
2512 	     the cc0.  */
2513 	  if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2514 	    break;
2515 
2516 	  continue;
2517 	}
2518 
2519       if (final_dest_bb && !moveall)
2520 	/* We haven't checked whether a partial move would be OK for the first
2521 	   move, so we have to fail this case.  */
2522 	break;
2523 
2524       changed = true;
2525       for (;;)
2526 	{
2527 	  if (currptr[0] == move_upto)
2528 	    break;
2529 	  for (ix = 0; ix < nedges; ix++)
2530 	    {
2531 	      rtx_insn *curr = currptr[ix];
2532 	      do
2533 		curr = NEXT_INSN (curr);
2534 	      while (!NONDEBUG_INSN_P (curr));
2535 	      currptr[ix] = curr;
2536 	    }
2537 	}
2538 
2539       /* If we can't currently move all of the identical insns, remember
2540 	 each insn after the range that we'll merge.  */
2541       if (!moveall)
2542 	for (ix = 0; ix < nedges; ix++)
2543 	  {
2544 	    rtx_insn *curr = currptr[ix];
2545 	    do
2546 	      curr = NEXT_INSN (curr);
2547 	    while (!NONDEBUG_INSN_P (curr));
2548 	    nextptr[ix] = curr;
2549 	  }
2550 
2551       reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before));
2552       df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest);
2553       if (final_dest_bb != NULL)
2554 	df_set_bb_dirty (final_dest_bb);
2555       df_set_bb_dirty (bb);
2556       for (ix = 1; ix < nedges; ix++)
2557 	{
2558 	  df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest);
2559 	  delete_insn_chain (headptr[ix], currptr[ix], false);
2560 	}
2561       if (!moveall)
2562 	{
2563 	  if (jump == move_before)
2564 	    break;
2565 
2566 	  /* For the unmerged insns, try a different insertion point.  */
2567 	  move_before = jump;
2568 
2569 	  /* Don't try moving before a cc0 user, as that may invalidate
2570 	     the cc0.  */
2571 	  if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2572 	    break;
2573 
2574 	  for (ix = 0; ix < nedges; ix++)
2575 	    currptr[ix] = headptr[ix] = nextptr[ix];
2576 	}
2577     }
2578   while (!moveall);
2579 
2580  out:
2581   free (currptr);
2582   free (headptr);
2583   free (nextptr);
2584 
2585   crossjumps_occured |= changed;
2586 
2587   return changed;
2588 }
2589 
2590 /* Return true if BB contains just bb note, or bb note followed
2591    by only DEBUG_INSNs.  */
2592 
2593 static bool
trivially_empty_bb_p(basic_block bb)2594 trivially_empty_bb_p (basic_block bb)
2595 {
2596   rtx_insn *insn = BB_END (bb);
2597 
2598   while (1)
2599     {
2600       if (insn == BB_HEAD (bb))
2601 	return true;
2602       if (!DEBUG_INSN_P (insn))
2603 	return false;
2604       insn = PREV_INSN (insn);
2605     }
2606 }
2607 
2608 /* Do simple CFG optimizations - basic block merging, simplifying of jump
2609    instructions etc.  Return nonzero if changes were made.  */
2610 
2611 static bool
try_optimize_cfg(int mode)2612 try_optimize_cfg (int mode)
2613 {
2614   bool changed_overall = false;
2615   bool changed;
2616   int iterations = 0;
2617   basic_block bb, b, next;
2618 
2619   if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
2620     clear_bb_flags ();
2621 
2622   crossjumps_occured = false;
2623 
2624   FOR_EACH_BB_FN (bb, cfun)
2625     update_forwarder_flag (bb);
2626 
2627   if (! targetm.cannot_modify_jumps_p ())
2628     {
2629       first_pass = true;
2630       /* Attempt to merge blocks as made possible by edge removal.  If
2631 	 a block has only one successor, and the successor has only
2632 	 one predecessor, they may be combined.  */
2633       do
2634 	{
2635 	  block_was_dirty = false;
2636 	  changed = false;
2637 	  iterations++;
2638 
2639 	  if (dump_file)
2640 	    fprintf (dump_file,
2641 		     "\n\ntry_optimize_cfg iteration %i\n\n",
2642 		     iterations);
2643 
2644 	  for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
2645 	       != EXIT_BLOCK_PTR_FOR_FN (cfun);)
2646 	    {
2647 	      basic_block c;
2648 	      edge s;
2649 	      bool changed_here = false;
2650 
2651 	      /* Delete trivially dead basic blocks.  This is either
2652 		 blocks with no predecessors, or empty blocks with no
2653 		 successors.  However if the empty block with no
2654 		 successors is the successor of the ENTRY_BLOCK, it is
2655 		 kept.  This ensures that the ENTRY_BLOCK will have a
2656 		 successor which is a precondition for many RTL
2657 		 passes.  Empty blocks may result from expanding
2658 		 __builtin_unreachable ().  */
2659 	      if (EDGE_COUNT (b->preds) == 0
2660 		  || (EDGE_COUNT (b->succs) == 0
2661 		      && trivially_empty_bb_p (b)
2662 		      && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->dest
2663 		      != b))
2664 		{
2665 		  c = b->prev_bb;
2666 		  if (EDGE_COUNT (b->preds) > 0)
2667 		    {
2668 		      edge e;
2669 		      edge_iterator ei;
2670 
2671 		      if (current_ir_type () == IR_RTL_CFGLAYOUT)
2672 			{
2673 			  if (BB_FOOTER (b)
2674 			      && BARRIER_P (BB_FOOTER (b)))
2675 			    FOR_EACH_EDGE (e, ei, b->preds)
2676 			      if ((e->flags & EDGE_FALLTHRU)
2677 				  && BB_FOOTER (e->src) == NULL)
2678 				{
2679 				  if (BB_FOOTER (b))
2680 				    {
2681 				      BB_FOOTER (e->src) = BB_FOOTER (b);
2682 				      BB_FOOTER (b) = NULL;
2683 				    }
2684 				  else
2685 				    {
2686 				      start_sequence ();
2687 				      BB_FOOTER (e->src) = emit_barrier ();
2688 				      end_sequence ();
2689 				    }
2690 				}
2691 			}
2692 		      else
2693 			{
2694 			  rtx_insn *last = get_last_bb_insn (b);
2695 			  if (last && BARRIER_P (last))
2696 			    FOR_EACH_EDGE (e, ei, b->preds)
2697 			      if ((e->flags & EDGE_FALLTHRU))
2698 				emit_barrier_after (BB_END (e->src));
2699 			}
2700 		    }
2701 		  delete_basic_block (b);
2702 		  changed = true;
2703 		  /* Avoid trying to remove the exit block.  */
2704 		  b = (c == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? c->next_bb : c);
2705 		  continue;
2706 		}
2707 
2708 	      /* Remove code labels no longer used.  */
2709 	      if (single_pred_p (b)
2710 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2711 		  && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
2712 		  && LABEL_P (BB_HEAD (b))
2713 		  && !LABEL_PRESERVE_P (BB_HEAD (b))
2714 		  /* If the previous block ends with a branch to this
2715 		     block, we can't delete the label.  Normally this
2716 		     is a condjump that is yet to be simplified, but
2717 		     if CASE_DROPS_THRU, this can be a tablejump with
2718 		     some element going to the same place as the
2719 		     default (fallthru).  */
2720 		  && (single_pred (b) == ENTRY_BLOCK_PTR_FOR_FN (cfun)
2721 		      || !JUMP_P (BB_END (single_pred (b)))
2722 		      || ! label_is_jump_target_p (BB_HEAD (b),
2723 						   BB_END (single_pred (b)))))
2724 		{
2725 		  delete_insn (BB_HEAD (b));
2726 		  if (dump_file)
2727 		    fprintf (dump_file, "Deleted label in block %i.\n",
2728 			     b->index);
2729 		}
2730 
2731 	      /* If we fall through an empty block, we can remove it.  */
2732 	      if (!(mode & (CLEANUP_CFGLAYOUT | CLEANUP_NO_INSN_DEL))
2733 		  && single_pred_p (b)
2734 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2735 		  && !LABEL_P (BB_HEAD (b))
2736 		  && FORWARDER_BLOCK_P (b)
2737 		  /* Note that forwarder_block_p true ensures that
2738 		     there is a successor for this block.  */
2739 		  && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2740 		  && n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS + 1)
2741 		{
2742 		  if (dump_file)
2743 		    fprintf (dump_file,
2744 			     "Deleting fallthru block %i.\n",
2745 			     b->index);
2746 
2747 		  c = ((b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2748 		       ? b->next_bb : b->prev_bb);
2749 		  redirect_edge_succ_nodup (single_pred_edge (b),
2750 					    single_succ (b));
2751 		  delete_basic_block (b);
2752 		  changed = true;
2753 		  b = c;
2754 		  continue;
2755 		}
2756 
2757 	      /* Merge B with its single successor, if any.  */
2758 	      if (single_succ_p (b)
2759 		  && (s = single_succ_edge (b))
2760 		  && !(s->flags & EDGE_COMPLEX)
2761 		  && (c = s->dest) != EXIT_BLOCK_PTR_FOR_FN (cfun)
2762 		  && single_pred_p (c)
2763 		  && b != c)
2764 		{
2765 		  /* When not in cfg_layout mode use code aware of reordering
2766 		     INSN.  This code possibly creates new basic blocks so it
2767 		     does not fit merge_blocks interface and is kept here in
2768 		     hope that it will become useless once more of compiler
2769 		     is transformed to use cfg_layout mode.  */
2770 
2771 		  if ((mode & CLEANUP_CFGLAYOUT)
2772 		      && can_merge_blocks_p (b, c))
2773 		    {
2774 		      merge_blocks (b, c);
2775 		      update_forwarder_flag (b);
2776 		      changed_here = true;
2777 		    }
2778 		  else if (!(mode & CLEANUP_CFGLAYOUT)
2779 			   /* If the jump insn has side effects,
2780 			      we can't kill the edge.  */
2781 			   && (!JUMP_P (BB_END (b))
2782 			       || (reload_completed
2783 				   ? simplejump_p (BB_END (b))
2784 				   : (onlyjump_p (BB_END (b))
2785 				      && !tablejump_p (BB_END (b),
2786 						       NULL, NULL))))
2787 			   && (next = merge_blocks_move (s, b, c, mode)))
2788 		      {
2789 			b = next;
2790 			changed_here = true;
2791 		      }
2792 		}
2793 
2794 	      /* Simplify branch over branch.  */
2795 	      if ((mode & CLEANUP_EXPENSIVE)
2796 		   && !(mode & CLEANUP_CFGLAYOUT)
2797 		   && try_simplify_condjump (b))
2798 		changed_here = true;
2799 
2800 	      /* If B has a single outgoing edge, but uses a
2801 		 non-trivial jump instruction without side-effects, we
2802 		 can either delete the jump entirely, or replace it
2803 		 with a simple unconditional jump.  */
2804 	      if (single_succ_p (b)
2805 		  && single_succ (b) != EXIT_BLOCK_PTR_FOR_FN (cfun)
2806 		  && onlyjump_p (BB_END (b))
2807 		  && !CROSSING_JUMP_P (BB_END (b))
2808 		  && try_redirect_by_replacing_jump (single_succ_edge (b),
2809 						     single_succ (b),
2810 						     (mode & CLEANUP_CFGLAYOUT) != 0))
2811 		{
2812 		  update_forwarder_flag (b);
2813 		  changed_here = true;
2814 		}
2815 
2816 	      /* Simplify branch to branch.  */
2817 	      if (try_forward_edges (mode, b))
2818 		{
2819 		  update_forwarder_flag (b);
2820 		  changed_here = true;
2821 		}
2822 
2823 	      /* Look for shared code between blocks.  */
2824 	      if ((mode & CLEANUP_CROSSJUMP)
2825 		  && try_crossjump_bb (mode, b))
2826 		changed_here = true;
2827 
2828 	      if ((mode & CLEANUP_CROSSJUMP)
2829 		  /* This can lengthen register lifetimes.  Do it only after
2830 		     reload.  */
2831 		  && reload_completed
2832 		  && try_head_merge_bb (b))
2833 		changed_here = true;
2834 
2835 	      /* Don't get confused by the index shift caused by
2836 		 deleting blocks.  */
2837 	      if (!changed_here)
2838 		b = b->next_bb;
2839 	      else
2840 		changed = true;
2841 	    }
2842 
2843 	  if ((mode & CLEANUP_CROSSJUMP)
2844 	      && try_crossjump_bb (mode, EXIT_BLOCK_PTR_FOR_FN (cfun)))
2845 	    changed = true;
2846 
2847 	  if (block_was_dirty)
2848 	    {
2849 	      /* This should only be set by head-merging.  */
2850 	      gcc_assert (mode & CLEANUP_CROSSJUMP);
2851 	      df_analyze ();
2852 	    }
2853 
2854 	  if (changed)
2855             {
2856               /* Edge forwarding in particular can cause hot blocks previously
2857                  reached by both hot and cold blocks to become dominated only
2858                  by cold blocks. This will cause the verification below to fail,
2859                  and lead to now cold code in the hot section. This is not easy
2860                  to detect and fix during edge forwarding, and in some cases
2861                  is only visible after newly unreachable blocks are deleted,
2862                  which will be done in fixup_partitions.  */
2863 	      fixup_partitions ();
2864 	      checking_verify_flow_info ();
2865             }
2866 
2867 	  changed_overall |= changed;
2868 	  first_pass = false;
2869 	}
2870       while (changed);
2871     }
2872 
2873   FOR_ALL_BB_FN (b, cfun)
2874     b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2875 
2876   return changed_overall;
2877 }
2878 
2879 /* Delete all unreachable basic blocks.  */
2880 
2881 bool
delete_unreachable_blocks(void)2882 delete_unreachable_blocks (void)
2883 {
2884   bool changed = false;
2885   basic_block b, prev_bb;
2886 
2887   find_unreachable_blocks ();
2888 
2889   /* When we're in GIMPLE mode and there may be debug insns, we should
2890      delete blocks in reverse dominator order, so as to get a chance
2891      to substitute all released DEFs into debug stmts.  If we don't
2892      have dominators information, walking blocks backward gets us a
2893      better chance of retaining most debug information than
2894      otherwise.  */
2895   if (MAY_HAVE_DEBUG_INSNS && current_ir_type () == IR_GIMPLE
2896       && dom_info_available_p (CDI_DOMINATORS))
2897     {
2898       for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
2899 	   b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
2900 	{
2901 	  prev_bb = b->prev_bb;
2902 
2903 	  if (!(b->flags & BB_REACHABLE))
2904 	    {
2905 	      /* Speed up the removal of blocks that don't dominate
2906 		 others.  Walking backwards, this should be the common
2907 		 case.  */
2908 	      if (!first_dom_son (CDI_DOMINATORS, b))
2909 		delete_basic_block (b);
2910 	      else
2911 		{
2912 		  vec<basic_block> h
2913 		    = get_all_dominated_blocks (CDI_DOMINATORS, b);
2914 
2915 		  while (h.length ())
2916 		    {
2917 		      b = h.pop ();
2918 
2919 		      prev_bb = b->prev_bb;
2920 
2921 		      gcc_assert (!(b->flags & BB_REACHABLE));
2922 
2923 		      delete_basic_block (b);
2924 		    }
2925 
2926 		  h.release ();
2927 		}
2928 
2929 	      changed = true;
2930 	    }
2931 	}
2932     }
2933   else
2934     {
2935       for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
2936 	   b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
2937 	{
2938 	  prev_bb = b->prev_bb;
2939 
2940 	  if (!(b->flags & BB_REACHABLE))
2941 	    {
2942 	      delete_basic_block (b);
2943 	      changed = true;
2944 	    }
2945 	}
2946     }
2947 
2948   if (changed)
2949     tidy_fallthru_edges ();
2950   return changed;
2951 }
2952 
2953 /* Delete any jump tables never referenced.  We can't delete them at the
2954    time of removing tablejump insn as they are referenced by the preceding
2955    insns computing the destination, so we delay deleting and garbagecollect
2956    them once life information is computed.  */
2957 void
delete_dead_jumptables(void)2958 delete_dead_jumptables (void)
2959 {
2960   basic_block bb;
2961 
2962   /* A dead jump table does not belong to any basic block.  Scan insns
2963      between two adjacent basic blocks.  */
2964   FOR_EACH_BB_FN (bb, cfun)
2965     {
2966       rtx_insn *insn, *next;
2967 
2968       for (insn = NEXT_INSN (BB_END (bb));
2969 	   insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
2970 	   insn = next)
2971 	{
2972 	  next = NEXT_INSN (insn);
2973 	  if (LABEL_P (insn)
2974 	      && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
2975 	      && JUMP_TABLE_DATA_P (next))
2976 	    {
2977 	      rtx_insn *label = insn, *jump = next;
2978 
2979 	      if (dump_file)
2980 		fprintf (dump_file, "Dead jumptable %i removed\n",
2981 			 INSN_UID (insn));
2982 
2983 	      next = NEXT_INSN (next);
2984 	      delete_insn (jump);
2985 	      delete_insn (label);
2986 	    }
2987 	}
2988     }
2989 }
2990 
2991 
2992 /* Tidy the CFG by deleting unreachable code and whatnot.  */
2993 
2994 bool
cleanup_cfg(int mode)2995 cleanup_cfg (int mode)
2996 {
2997   bool changed = false;
2998 
2999   /* Set the cfglayout mode flag here.  We could update all the callers
3000      but that is just inconvenient, especially given that we eventually
3001      want to have cfglayout mode as the default.  */
3002   if (current_ir_type () == IR_RTL_CFGLAYOUT)
3003     mode |= CLEANUP_CFGLAYOUT;
3004 
3005   timevar_push (TV_CLEANUP_CFG);
3006   if (delete_unreachable_blocks ())
3007     {
3008       changed = true;
3009       /* We've possibly created trivially dead code.  Cleanup it right
3010 	 now to introduce more opportunities for try_optimize_cfg.  */
3011       if (!(mode & (CLEANUP_NO_INSN_DEL))
3012 	  && !reload_completed)
3013 	delete_trivially_dead_insns (get_insns (), max_reg_num ());
3014     }
3015 
3016   compact_blocks ();
3017 
3018   /* To tail-merge blocks ending in the same noreturn function (e.g.
3019      a call to abort) we have to insert fake edges to exit.  Do this
3020      here once.  The fake edges do not interfere with any other CFG
3021      cleanups.  */
3022   if (mode & CLEANUP_CROSSJUMP)
3023     add_noreturn_fake_exit_edges ();
3024 
3025   if (!dbg_cnt (cfg_cleanup))
3026     return changed;
3027 
3028   while (try_optimize_cfg (mode))
3029     {
3030       delete_unreachable_blocks (), changed = true;
3031       if (!(mode & CLEANUP_NO_INSN_DEL))
3032 	{
3033 	  /* Try to remove some trivially dead insns when doing an expensive
3034 	     cleanup.  But delete_trivially_dead_insns doesn't work after
3035 	     reload (it only handles pseudos) and run_fast_dce is too costly
3036 	     to run in every iteration.
3037 
3038 	     For effective cross jumping, we really want to run a fast DCE to
3039 	     clean up any dead conditions, or they get in the way of performing
3040 	     useful tail merges.
3041 
3042 	     Other transformations in cleanup_cfg are not so sensitive to dead
3043 	     code, so delete_trivially_dead_insns or even doing nothing at all
3044 	     is good enough.  */
3045 	  if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
3046 	      && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
3047 	    break;
3048 	  if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occured)
3049 	    run_fast_dce ();
3050 	}
3051       else
3052 	break;
3053     }
3054 
3055   if (mode & CLEANUP_CROSSJUMP)
3056     remove_fake_exit_edges ();
3057 
3058   /* Don't call delete_dead_jumptables in cfglayout mode, because
3059      that function assumes that jump tables are in the insns stream.
3060      But we also don't _have_ to delete dead jumptables in cfglayout
3061      mode because we shouldn't even be looking at things that are
3062      not in a basic block.  Dead jumptables are cleaned up when
3063      going out of cfglayout mode.  */
3064   if (!(mode & CLEANUP_CFGLAYOUT))
3065     delete_dead_jumptables ();
3066 
3067   /* ???  We probably do this way too often.  */
3068   if (current_loops
3069       && (changed
3070 	  || (mode & CLEANUP_CFG_CHANGED)))
3071     {
3072       timevar_push (TV_REPAIR_LOOPS);
3073       /* The above doesn't preserve dominance info if available.  */
3074       gcc_assert (!dom_info_available_p (CDI_DOMINATORS));
3075       calculate_dominance_info (CDI_DOMINATORS);
3076       fix_loop_structure (NULL);
3077       free_dominance_info (CDI_DOMINATORS);
3078       timevar_pop (TV_REPAIR_LOOPS);
3079     }
3080 
3081   timevar_pop (TV_CLEANUP_CFG);
3082 
3083   return changed;
3084 }
3085 
3086 namespace {
3087 
3088 const pass_data pass_data_jump =
3089 {
3090   RTL_PASS, /* type */
3091   "jump", /* name */
3092   OPTGROUP_NONE, /* optinfo_flags */
3093   TV_JUMP, /* tv_id */
3094   0, /* properties_required */
3095   0, /* properties_provided */
3096   0, /* properties_destroyed */
3097   0, /* todo_flags_start */
3098   0, /* todo_flags_finish */
3099 };
3100 
3101 class pass_jump : public rtl_opt_pass
3102 {
3103 public:
pass_jump(gcc::context * ctxt)3104   pass_jump (gcc::context *ctxt)
3105     : rtl_opt_pass (pass_data_jump, ctxt)
3106   {}
3107 
3108   /* opt_pass methods: */
3109   virtual unsigned int execute (function *);
3110 
3111 }; // class pass_jump
3112 
3113 unsigned int
execute(function *)3114 pass_jump::execute (function *)
3115 {
3116   delete_trivially_dead_insns (get_insns (), max_reg_num ());
3117   if (dump_file)
3118     dump_flow_info (dump_file, dump_flags);
3119   cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
3120 	       | (flag_thread_jumps ? CLEANUP_THREADING : 0));
3121   return 0;
3122 }
3123 
3124 } // anon namespace
3125 
3126 rtl_opt_pass *
make_pass_jump(gcc::context * ctxt)3127 make_pass_jump (gcc::context *ctxt)
3128 {
3129   return new pass_jump (ctxt);
3130 }
3131 
3132 namespace {
3133 
3134 const pass_data pass_data_jump2 =
3135 {
3136   RTL_PASS, /* type */
3137   "jump2", /* name */
3138   OPTGROUP_NONE, /* optinfo_flags */
3139   TV_JUMP, /* tv_id */
3140   0, /* properties_required */
3141   0, /* properties_provided */
3142   0, /* properties_destroyed */
3143   0, /* todo_flags_start */
3144   0, /* todo_flags_finish */
3145 };
3146 
3147 class pass_jump2 : public rtl_opt_pass
3148 {
3149 public:
pass_jump2(gcc::context * ctxt)3150   pass_jump2 (gcc::context *ctxt)
3151     : rtl_opt_pass (pass_data_jump2, ctxt)
3152   {}
3153 
3154   /* opt_pass methods: */
execute(function *)3155   virtual unsigned int execute (function *)
3156     {
3157       cleanup_cfg (flag_crossjumping ? CLEANUP_CROSSJUMP : 0);
3158       return 0;
3159     }
3160 
3161 }; // class pass_jump2
3162 
3163 } // anon namespace
3164 
3165 rtl_opt_pass *
make_pass_jump2(gcc::context * ctxt)3166 make_pass_jump2 (gcc::context *ctxt)
3167 {
3168   return new pass_jump2 (ctxt);
3169 }
3170