xref: /openbsd/gnu/gcc/gcc/cfgcleanup.c (revision 404b540a)
1 /* Control flow optimization code for GNU compiler.
2    Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3    1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING.  If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA.  */
21 
22 /* This file contains optimizer of the control flow.  The main entry point is
23    cleanup_cfg.  Following optimizations are performed:
24 
25    - Unreachable blocks removal
26    - Edge forwarding (edge to the forwarder block is forwarded to its
27      successor.  Simplification of the branch instruction is performed by
28      underlying infrastructure so branch can be converted to simplejump or
29      eliminated).
30    - Cross jumping (tail merging)
31    - Conditional jump-around-simplejump simplification
32    - Basic block merging.  */
33 
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "regs.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "cfglayout.h"
52 #include "emit-rtl.h"
53 #include "tree-pass.h"
54 #include "cfgloop.h"
55 #include "expr.h"
56 
57 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
58 
59 /* Set to true when we are running first pass of try_optimize_cfg loop.  */
60 static bool first_pass;
61 static bool try_crossjump_to_edge (int, edge, edge);
62 static bool try_crossjump_bb (int, basic_block);
63 static bool outgoing_edges_match (int, basic_block, basic_block);
64 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
65 static bool old_insns_match_p (int, rtx, rtx);
66 
67 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
68 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
69 static bool try_optimize_cfg (int);
70 static bool try_simplify_condjump (basic_block);
71 static bool try_forward_edges (int, basic_block);
72 static edge thread_jump (int, edge, basic_block);
73 static bool mark_effect (rtx, bitmap);
74 static void notice_new_block (basic_block);
75 static void update_forwarder_flag (basic_block);
76 static int mentions_nonequal_regs (rtx *, void *);
77 static void merge_memattrs (rtx, rtx);
78 
79 /* Set flags for newly created block.  */
80 
81 static void
notice_new_block(basic_block bb)82 notice_new_block (basic_block bb)
83 {
84   if (!bb)
85     return;
86 
87   if (forwarder_block_p (bb))
88     bb->flags |= BB_FORWARDER_BLOCK;
89 }
90 
91 /* Recompute forwarder flag after block has been modified.  */
92 
93 static void
update_forwarder_flag(basic_block bb)94 update_forwarder_flag (basic_block bb)
95 {
96   if (forwarder_block_p (bb))
97     bb->flags |= BB_FORWARDER_BLOCK;
98   else
99     bb->flags &= ~BB_FORWARDER_BLOCK;
100 }
101 
102 /* Simplify a conditional jump around an unconditional jump.
103    Return true if something changed.  */
104 
105 static bool
try_simplify_condjump(basic_block cbranch_block)106 try_simplify_condjump (basic_block cbranch_block)
107 {
108   basic_block jump_block, jump_dest_block, cbranch_dest_block;
109   edge cbranch_jump_edge, cbranch_fallthru_edge;
110   rtx cbranch_insn;
111 
112   /* Verify that there are exactly two successors.  */
113   if (EDGE_COUNT (cbranch_block->succs) != 2)
114     return false;
115 
116   /* Verify that we've got a normal conditional branch at the end
117      of the block.  */
118   cbranch_insn = BB_END (cbranch_block);
119   if (!any_condjump_p (cbranch_insn))
120     return false;
121 
122   cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
123   cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
124 
125   /* The next block must not have multiple predecessors, must not
126      be the last block in the function, and must contain just the
127      unconditional jump.  */
128   jump_block = cbranch_fallthru_edge->dest;
129   if (!single_pred_p (jump_block)
130       || jump_block->next_bb == EXIT_BLOCK_PTR
131       || !FORWARDER_BLOCK_P (jump_block))
132     return false;
133   jump_dest_block = single_succ (jump_block);
134 
135   /* If we are partitioning hot/cold basic blocks, we don't want to
136      mess up unconditional or indirect jumps that cross between hot
137      and cold sections.
138 
139      Basic block partitioning may result in some jumps that appear to
140      be optimizable (or blocks that appear to be mergeable), but which really
141      must be left untouched (they are required to make it safely across
142      partition boundaries).  See the comments at the top of
143      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
144 
145   if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
146       || (cbranch_jump_edge->flags & EDGE_CROSSING))
147     return false;
148 
149   /* The conditional branch must target the block after the
150      unconditional branch.  */
151   cbranch_dest_block = cbranch_jump_edge->dest;
152 
153   if (cbranch_dest_block == EXIT_BLOCK_PTR
154       || !can_fallthru (jump_block, cbranch_dest_block))
155     return false;
156 
157   /* Invert the conditional branch.  */
158   if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
159     return false;
160 
161   if (dump_file)
162     fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
163 	     INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
164 
165   /* Success.  Update the CFG to match.  Note that after this point
166      the edge variable names appear backwards; the redirection is done
167      this way to preserve edge profile data.  */
168   cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
169 						cbranch_dest_block);
170   cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
171 						    jump_dest_block);
172   cbranch_jump_edge->flags |= EDGE_FALLTHRU;
173   cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
174   update_br_prob_note (cbranch_block);
175 
176   /* Delete the block with the unconditional jump, and clean up the mess.  */
177   delete_basic_block (jump_block);
178   tidy_fallthru_edge (cbranch_jump_edge);
179   update_forwarder_flag (cbranch_block);
180 
181   return true;
182 }
183 
184 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
185    on register.  Used by jump threading.  */
186 
187 static bool
mark_effect(rtx exp,regset nonequal)188 mark_effect (rtx exp, regset nonequal)
189 {
190   int regno;
191   rtx dest;
192   switch (GET_CODE (exp))
193     {
194       /* In case we do clobber the register, mark it as equal, as we know the
195 	 value is dead so it don't have to match.  */
196     case CLOBBER:
197       if (REG_P (XEXP (exp, 0)))
198 	{
199 	  dest = XEXP (exp, 0);
200 	  regno = REGNO (dest);
201 	  CLEAR_REGNO_REG_SET (nonequal, regno);
202 	  if (regno < FIRST_PSEUDO_REGISTER)
203 	    {
204 	      int n = hard_regno_nregs[regno][GET_MODE (dest)];
205 	      while (--n > 0)
206 		CLEAR_REGNO_REG_SET (nonequal, regno + n);
207 	    }
208 	}
209       return false;
210 
211     case SET:
212       if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
213 	return false;
214       dest = SET_DEST (exp);
215       if (dest == pc_rtx)
216 	return false;
217       if (!REG_P (dest))
218 	return true;
219       regno = REGNO (dest);
220       SET_REGNO_REG_SET (nonequal, regno);
221       if (regno < FIRST_PSEUDO_REGISTER)
222 	{
223 	  int n = hard_regno_nregs[regno][GET_MODE (dest)];
224 	  while (--n > 0)
225 	    SET_REGNO_REG_SET (nonequal, regno + n);
226 	}
227       return false;
228 
229     default:
230       return false;
231     }
232 }
233 
234 /* Return nonzero if X is a register set in regset DATA.
235    Called via for_each_rtx.  */
236 static int
mentions_nonequal_regs(rtx * x,void * data)237 mentions_nonequal_regs (rtx *x, void *data)
238 {
239   regset nonequal = (regset) data;
240   if (REG_P (*x))
241     {
242       int regno;
243 
244       regno = REGNO (*x);
245       if (REGNO_REG_SET_P (nonequal, regno))
246 	return 1;
247       if (regno < FIRST_PSEUDO_REGISTER)
248 	{
249 	  int n = hard_regno_nregs[regno][GET_MODE (*x)];
250 	  while (--n > 0)
251 	    if (REGNO_REG_SET_P (nonequal, regno + n))
252 	      return 1;
253 	}
254     }
255   return 0;
256 }
257 /* Attempt to prove that the basic block B will have no side effects and
258    always continues in the same edge if reached via E.  Return the edge
259    if exist, NULL otherwise.  */
260 
261 static edge
thread_jump(int mode,edge e,basic_block b)262 thread_jump (int mode, edge e, basic_block b)
263 {
264   rtx set1, set2, cond1, cond2, insn;
265   enum rtx_code code1, code2, reversed_code2;
266   bool reverse1 = false;
267   unsigned i;
268   regset nonequal;
269   bool failed = false;
270   reg_set_iterator rsi;
271 
272   if (b->flags & BB_NONTHREADABLE_BLOCK)
273     return NULL;
274 
275   /* At the moment, we do handle only conditional jumps, but later we may
276      want to extend this code to tablejumps and others.  */
277   if (EDGE_COUNT (e->src->succs) != 2)
278     return NULL;
279   if (EDGE_COUNT (b->succs) != 2)
280     {
281       b->flags |= BB_NONTHREADABLE_BLOCK;
282       return NULL;
283     }
284 
285   /* Second branch must end with onlyjump, as we will eliminate the jump.  */
286   if (!any_condjump_p (BB_END (e->src)))
287     return NULL;
288 
289   if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
290     {
291       b->flags |= BB_NONTHREADABLE_BLOCK;
292       return NULL;
293     }
294 
295   set1 = pc_set (BB_END (e->src));
296   set2 = pc_set (BB_END (b));
297   if (((e->flags & EDGE_FALLTHRU) != 0)
298       != (XEXP (SET_SRC (set1), 1) == pc_rtx))
299     reverse1 = true;
300 
301   cond1 = XEXP (SET_SRC (set1), 0);
302   cond2 = XEXP (SET_SRC (set2), 0);
303   if (reverse1)
304     code1 = reversed_comparison_code (cond1, BB_END (e->src));
305   else
306     code1 = GET_CODE (cond1);
307 
308   code2 = GET_CODE (cond2);
309   reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
310 
311   if (!comparison_dominates_p (code1, code2)
312       && !comparison_dominates_p (code1, reversed_code2))
313     return NULL;
314 
315   /* Ensure that the comparison operators are equivalent.
316      ??? This is far too pessimistic.  We should allow swapped operands,
317      different CCmodes, or for example comparisons for interval, that
318      dominate even when operands are not equivalent.  */
319   if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
320       || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
321     return NULL;
322 
323   /* Short circuit cases where block B contains some side effects, as we can't
324      safely bypass it.  */
325   for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
326        insn = NEXT_INSN (insn))
327     if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
328       {
329 	b->flags |= BB_NONTHREADABLE_BLOCK;
330 	return NULL;
331       }
332 
333   cselib_init (false);
334 
335   /* First process all values computed in the source basic block.  */
336   for (insn = NEXT_INSN (BB_HEAD (e->src));
337        insn != NEXT_INSN (BB_END (e->src));
338        insn = NEXT_INSN (insn))
339     if (INSN_P (insn))
340       cselib_process_insn (insn);
341 
342   nonequal = BITMAP_ALLOC (NULL);
343   CLEAR_REG_SET (nonequal);
344 
345   /* Now assume that we've continued by the edge E to B and continue
346      processing as if it were same basic block.
347      Our goal is to prove that whole block is an NOOP.  */
348 
349   for (insn = NEXT_INSN (BB_HEAD (b));
350        insn != NEXT_INSN (BB_END (b)) && !failed;
351        insn = NEXT_INSN (insn))
352     {
353       if (INSN_P (insn))
354 	{
355 	  rtx pat = PATTERN (insn);
356 
357 	  if (GET_CODE (pat) == PARALLEL)
358 	    {
359 	      for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
360 		failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
361 	    }
362 	  else
363 	    failed |= mark_effect (pat, nonequal);
364 	}
365 
366       cselib_process_insn (insn);
367     }
368 
369   /* Later we should clear nonequal of dead registers.  So far we don't
370      have life information in cfg_cleanup.  */
371   if (failed)
372     {
373       b->flags |= BB_NONTHREADABLE_BLOCK;
374       goto failed_exit;
375     }
376 
377   /* cond2 must not mention any register that is not equal to the
378      former block.  */
379   if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
380     goto failed_exit;
381 
382   /* In case liveness information is available, we need to prove equivalence
383      only of the live values.  */
384   if (mode & CLEANUP_UPDATE_LIFE)
385     AND_REG_SET (nonequal, b->il.rtl->global_live_at_end);
386 
387   EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
388     goto failed_exit;
389 
390   BITMAP_FREE (nonequal);
391   cselib_finish ();
392   if ((comparison_dominates_p (code1, code2) != 0)
393       != (XEXP (SET_SRC (set2), 1) == pc_rtx))
394     return BRANCH_EDGE (b);
395   else
396     return FALLTHRU_EDGE (b);
397 
398 failed_exit:
399   BITMAP_FREE (nonequal);
400   cselib_finish ();
401   return NULL;
402 }
403 
404 /* Attempt to forward edges leaving basic block B.
405    Return true if successful.  */
406 
407 static bool
try_forward_edges(int mode,basic_block b)408 try_forward_edges (int mode, basic_block b)
409 {
410   bool changed = false;
411   edge_iterator ei;
412   edge e, *threaded_edges = NULL;
413 
414   /* If we are partitioning hot/cold basic blocks, we don't want to
415      mess up unconditional or indirect jumps that cross between hot
416      and cold sections.
417 
418      Basic block partitioning may result in some jumps that appear to
419      be optimizable (or blocks that appear to be mergeable), but which really m
420      ust be left untouched (they are required to make it safely across
421      partition boundaries).  See the comments at the top of
422      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
423 
424   if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
425     return false;
426 
427   for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
428     {
429       basic_block target, first;
430       int counter;
431       bool threaded = false;
432       int nthreaded_edges = 0;
433       bool may_thread = first_pass | (b->flags & BB_DIRTY);
434 
435       /* Skip complex edges because we don't know how to update them.
436 
437 	 Still handle fallthru edges, as we can succeed to forward fallthru
438 	 edge to the same place as the branch edge of conditional branch
439 	 and turn conditional branch to an unconditional branch.  */
440       if (e->flags & EDGE_COMPLEX)
441 	{
442 	  ei_next (&ei);
443 	  continue;
444 	}
445 
446       target = first = e->dest;
447       counter = NUM_FIXED_BLOCKS;
448 
449       /* If we are partitioning hot/cold basic_blocks, we don't want to mess
450 	 up jumps that cross between hot/cold sections.
451 
452 	 Basic block partitioning may result in some jumps that appear
453 	 to be optimizable (or blocks that appear to be mergeable), but which
454 	 really must be left untouched (they are required to make it safely
455 	 across partition boundaries).  See the comments at the top of
456 	 bb-reorder.c:partition_hot_cold_basic_blocks for complete
457 	 details.  */
458 
459       if (first != EXIT_BLOCK_PTR
460 	  && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
461 	return false;
462 
463       while (counter < n_basic_blocks)
464 	{
465 	  basic_block new_target = NULL;
466 	  bool new_target_threaded = false;
467 	  may_thread |= target->flags & BB_DIRTY;
468 
469 	  if (FORWARDER_BLOCK_P (target)
470 	      && !(single_succ_edge (target)->flags & EDGE_CROSSING)
471 	      && single_succ (target) != EXIT_BLOCK_PTR)
472 	    {
473 	      /* Bypass trivial infinite loops.  */
474 	      new_target = single_succ (target);
475 	      if (target == new_target)
476 		counter = n_basic_blocks;
477 	    }
478 
479 	  /* Allow to thread only over one edge at time to simplify updating
480 	     of probabilities.  */
481 	  else if ((mode & CLEANUP_THREADING) && may_thread)
482 	    {
483 	      edge t = thread_jump (mode, e, target);
484 	      if (t)
485 		{
486 		  if (!threaded_edges)
487 		    threaded_edges = XNEWVEC (edge, n_basic_blocks);
488 		  else
489 		    {
490 		      int i;
491 
492 		      /* Detect an infinite loop across blocks not
493 			 including the start block.  */
494 		      for (i = 0; i < nthreaded_edges; ++i)
495 			if (threaded_edges[i] == t)
496 			  break;
497 		      if (i < nthreaded_edges)
498 			{
499 			  counter = n_basic_blocks;
500 			  break;
501 			}
502 		    }
503 
504 		  /* Detect an infinite loop across the start block.  */
505 		  if (t->dest == b)
506 		    break;
507 
508 		  gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS);
509 		  threaded_edges[nthreaded_edges++] = t;
510 
511 		  new_target = t->dest;
512 		  new_target_threaded = true;
513 		}
514 	    }
515 
516 	  if (!new_target)
517 	    break;
518 
519 	  counter++;
520 	  target = new_target;
521 	  threaded |= new_target_threaded;
522 	}
523 
524       if (counter >= n_basic_blocks)
525 	{
526 	  if (dump_file)
527 	    fprintf (dump_file, "Infinite loop in BB %i.\n",
528 		     target->index);
529 	}
530       else if (target == first)
531 	; /* We didn't do anything.  */
532       else
533 	{
534 	  /* Save the values now, as the edge may get removed.  */
535 	  gcov_type edge_count = e->count;
536 	  int edge_probability = e->probability;
537 	  int edge_frequency;
538 	  int n = 0;
539 
540 	  /* Don't force if target is exit block.  */
541 	  if (threaded && target != EXIT_BLOCK_PTR)
542 	    {
543 	      notice_new_block (redirect_edge_and_branch_force (e, target));
544 	      if (dump_file)
545 		fprintf (dump_file, "Conditionals threaded.\n");
546 	    }
547 	  else if (!redirect_edge_and_branch (e, target))
548 	    {
549 	      if (dump_file)
550 		fprintf (dump_file,
551 			 "Forwarding edge %i->%i to %i failed.\n",
552 			 b->index, e->dest->index, target->index);
553 	      ei_next (&ei);
554 	      continue;
555 	    }
556 
557 	  /* We successfully forwarded the edge.  Now update profile
558 	     data: for each edge we traversed in the chain, remove
559 	     the original edge's execution count.  */
560 	  edge_frequency = ((edge_probability * b->frequency
561 			     + REG_BR_PROB_BASE / 2)
562 			    / REG_BR_PROB_BASE);
563 
564 	  if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
565 	    b->flags |= BB_FORWARDER_BLOCK;
566 
567 	  do
568 	    {
569 	      edge t;
570 
571 	      if (!single_succ_p (first))
572 		{
573 		  gcc_assert (n < nthreaded_edges);
574 		  t = threaded_edges [n++];
575 		  gcc_assert (t->src == first);
576 		  update_bb_profile_for_threading (first, edge_frequency,
577 						   edge_count, t);
578 		  update_br_prob_note (first);
579 		}
580 	      else
581 		{
582 		  first->count -= edge_count;
583 		  if (first->count < 0)
584 		    first->count = 0;
585 		  first->frequency -= edge_frequency;
586 		  if (first->frequency < 0)
587 		    first->frequency = 0;
588 		  /* It is possible that as the result of
589 		     threading we've removed edge as it is
590 		     threaded to the fallthru edge.  Avoid
591 		     getting out of sync.  */
592 		  if (n < nthreaded_edges
593 		      && first == threaded_edges [n]->src)
594 		    n++;
595 		  t = single_succ_edge (first);
596 		}
597 
598 	      t->count -= edge_count;
599 	      if (t->count < 0)
600 		t->count = 0;
601 	      first = t->dest;
602 	    }
603 	  while (first != target);
604 
605 	  changed = true;
606 	  continue;
607 	}
608       ei_next (&ei);
609     }
610 
611   if (threaded_edges)
612     free (threaded_edges);
613   return changed;
614 }
615 
616 
617 /* Blocks A and B are to be merged into a single block.  A has no incoming
618    fallthru edge, so it can be moved before B without adding or modifying
619    any jumps (aside from the jump from A to B).  */
620 
621 static void
merge_blocks_move_predecessor_nojumps(basic_block a,basic_block b)622 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
623 {
624   rtx barrier;
625   bool only_notes;
626 
627   /* If we are partitioning hot/cold basic blocks, we don't want to
628      mess up unconditional or indirect jumps that cross between hot
629      and cold sections.
630 
631      Basic block partitioning may result in some jumps that appear to
632      be optimizable (or blocks that appear to be mergeable), but which really
633      must be left untouched (they are required to make it safely across
634      partition boundaries).  See the comments at the top of
635      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
636 
637   if (BB_PARTITION (a) != BB_PARTITION (b))
638     return;
639 
640   barrier = next_nonnote_insn (BB_END (a));
641   gcc_assert (BARRIER_P (barrier));
642   delete_insn (barrier);
643 
644   /* Move block and loop notes out of the chain so that we do not
645      disturb their order.
646 
647      ??? A better solution would be to squeeze out all the non-nested notes
648      and adjust the block trees appropriately.   Even better would be to have
649      a tighter connection between block trees and rtl so that this is not
650      necessary.  */
651   only_notes = squeeze_notes (&BB_HEAD (a), &BB_END (a));
652   gcc_assert (!only_notes);
653 
654   /* Scramble the insn chain.  */
655   if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
656     reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
657   a->flags |= BB_DIRTY;
658 
659   if (dump_file)
660     fprintf (dump_file, "Moved block %d before %d and merged.\n",
661 	     a->index, b->index);
662 
663   /* Swap the records for the two blocks around.  */
664 
665   unlink_block (a);
666   link_block (a, b->prev_bb);
667 
668   /* Now blocks A and B are contiguous.  Merge them.  */
669   merge_blocks (a, b);
670 }
671 
672 /* Blocks A and B are to be merged into a single block.  B has no outgoing
673    fallthru edge, so it can be moved after A without adding or modifying
674    any jumps (aside from the jump from A to B).  */
675 
676 static void
merge_blocks_move_successor_nojumps(basic_block a,basic_block b)677 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
678 {
679   rtx barrier, real_b_end;
680   rtx label, table;
681   bool only_notes;
682 
683   /* If we are partitioning hot/cold basic blocks, we don't want to
684      mess up unconditional or indirect jumps that cross between hot
685      and cold sections.
686 
687      Basic block partitioning may result in some jumps that appear to
688      be optimizable (or blocks that appear to be mergeable), but which really
689      must be left untouched (they are required to make it safely across
690      partition boundaries).  See the comments at the top of
691      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
692 
693   if (BB_PARTITION (a) != BB_PARTITION (b))
694     return;
695 
696   real_b_end = BB_END (b);
697 
698   /* If there is a jump table following block B temporarily add the jump table
699      to block B so that it will also be moved to the correct location.  */
700   if (tablejump_p (BB_END (b), &label, &table)
701       && prev_active_insn (label) == BB_END (b))
702     {
703       BB_END (b) = table;
704     }
705 
706   /* There had better have been a barrier there.  Delete it.  */
707   barrier = NEXT_INSN (BB_END (b));
708   if (barrier && BARRIER_P (barrier))
709     delete_insn (barrier);
710 
711   /* Move block and loop notes out of the chain so that we do not
712      disturb their order.
713 
714      ??? A better solution would be to squeeze out all the non-nested notes
715      and adjust the block trees appropriately.   Even better would be to have
716      a tighter connection between block trees and rtl so that this is not
717      necessary.  */
718   only_notes = squeeze_notes (&BB_HEAD (b), &BB_END (b));
719   gcc_assert (!only_notes);
720 
721 
722   /* Scramble the insn chain.  */
723   reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
724 
725   /* Restore the real end of b.  */
726   BB_END (b) = real_b_end;
727 
728   if (dump_file)
729     fprintf (dump_file, "Moved block %d after %d and merged.\n",
730 	     b->index, a->index);
731 
732   /* Now blocks A and B are contiguous.  Merge them.  */
733   merge_blocks (a, b);
734 }
735 
736 /* Attempt to merge basic blocks that are potentially non-adjacent.
737    Return NULL iff the attempt failed, otherwise return basic block
738    where cleanup_cfg should continue.  Because the merging commonly
739    moves basic block away or introduces another optimization
740    possibility, return basic block just before B so cleanup_cfg don't
741    need to iterate.
742 
743    It may be good idea to return basic block before C in the case
744    C has been moved after B and originally appeared earlier in the
745    insn sequence, but we have no information available about the
746    relative ordering of these two.  Hopefully it is not too common.  */
747 
748 static basic_block
merge_blocks_move(edge e,basic_block b,basic_block c,int mode)749 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
750 {
751   basic_block next;
752 
753   /* If we are partitioning hot/cold basic blocks, we don't want to
754      mess up unconditional or indirect jumps that cross between hot
755      and cold sections.
756 
757      Basic block partitioning may result in some jumps that appear to
758      be optimizable (or blocks that appear to be mergeable), but which really
759      must be left untouched (they are required to make it safely across
760      partition boundaries).  See the comments at the top of
761      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
762 
763   if (BB_PARTITION (b) != BB_PARTITION (c))
764     return NULL;
765 
766 
767 
768   /* If B has a fallthru edge to C, no need to move anything.  */
769   if (e->flags & EDGE_FALLTHRU)
770     {
771       int b_index = b->index, c_index = c->index;
772       merge_blocks (b, c);
773       update_forwarder_flag (b);
774 
775       if (dump_file)
776 	fprintf (dump_file, "Merged %d and %d without moving.\n",
777 		 b_index, c_index);
778 
779       return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
780     }
781 
782   /* Otherwise we will need to move code around.  Do that only if expensive
783      transformations are allowed.  */
784   else if (mode & CLEANUP_EXPENSIVE)
785     {
786       edge tmp_edge, b_fallthru_edge;
787       bool c_has_outgoing_fallthru;
788       bool b_has_incoming_fallthru;
789       edge_iterator ei;
790 
791       /* Avoid overactive code motion, as the forwarder blocks should be
792 	 eliminated by edge redirection instead.  One exception might have
793 	 been if B is a forwarder block and C has no fallthru edge, but
794 	 that should be cleaned up by bb-reorder instead.  */
795       if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
796 	return NULL;
797 
798       /* We must make sure to not munge nesting of lexical blocks,
799 	 and loop notes.  This is done by squeezing out all the notes
800 	 and leaving them there to lie.  Not ideal, but functional.  */
801 
802       FOR_EACH_EDGE (tmp_edge, ei, c->succs)
803 	if (tmp_edge->flags & EDGE_FALLTHRU)
804 	  break;
805 
806       c_has_outgoing_fallthru = (tmp_edge != NULL);
807 
808       FOR_EACH_EDGE (tmp_edge, ei, b->preds)
809 	if (tmp_edge->flags & EDGE_FALLTHRU)
810 	  break;
811 
812       b_has_incoming_fallthru = (tmp_edge != NULL);
813       b_fallthru_edge = tmp_edge;
814       next = b->prev_bb;
815       if (next == c)
816 	next = next->prev_bb;
817 
818       /* Otherwise, we're going to try to move C after B.  If C does
819 	 not have an outgoing fallthru, then it can be moved
820 	 immediately after B without introducing or modifying jumps.  */
821       if (! c_has_outgoing_fallthru)
822 	{
823 	  merge_blocks_move_successor_nojumps (b, c);
824 	  return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
825 	}
826 
827       /* If B does not have an incoming fallthru, then it can be moved
828 	 immediately before C without introducing or modifying jumps.
829 	 C cannot be the first block, so we do not have to worry about
830 	 accessing a non-existent block.  */
831 
832       if (b_has_incoming_fallthru)
833 	{
834 	  basic_block bb;
835 
836 	  if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
837 	    return NULL;
838 	  bb = force_nonfallthru (b_fallthru_edge);
839 	  if (bb)
840 	    notice_new_block (bb);
841 	}
842 
843       merge_blocks_move_predecessor_nojumps (b, c);
844       return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
845     }
846 
847   return NULL;
848 }
849 
850 
851 /* Removes the memory attributes of MEM expression
852    if they are not equal.  */
853 
854 void
merge_memattrs(rtx x,rtx y)855 merge_memattrs (rtx x, rtx y)
856 {
857   int i;
858   int j;
859   enum rtx_code code;
860   const char *fmt;
861 
862   if (x == y)
863     return;
864   if (x == 0 || y == 0)
865     return;
866 
867   code = GET_CODE (x);
868 
869   if (code != GET_CODE (y))
870     return;
871 
872   if (GET_MODE (x) != GET_MODE (y))
873     return;
874 
875   if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
876     {
877       if (! MEM_ATTRS (x))
878 	MEM_ATTRS (y) = 0;
879       else if (! MEM_ATTRS (y))
880 	MEM_ATTRS (x) = 0;
881       else
882 	{
883 	  rtx mem_size;
884 
885 	  if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
886 	    {
887 	      set_mem_alias_set (x, 0);
888 	      set_mem_alias_set (y, 0);
889 	    }
890 
891 	  if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
892 	    {
893 	      set_mem_expr (x, 0);
894 	      set_mem_expr (y, 0);
895 	      set_mem_offset (x, 0);
896 	      set_mem_offset (y, 0);
897 	    }
898 	  else if (MEM_OFFSET (x) != MEM_OFFSET (y))
899 	    {
900 	      set_mem_offset (x, 0);
901 	      set_mem_offset (y, 0);
902 	    }
903 
904 	  if (!MEM_SIZE (x))
905 	    mem_size = NULL_RTX;
906 	  else if (!MEM_SIZE (y))
907 	    mem_size = NULL_RTX;
908 	  else
909 	    mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
910 				     INTVAL (MEM_SIZE (y))));
911 	  set_mem_size (x, mem_size);
912 	  set_mem_size (y, mem_size);
913 
914 	  set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
915 	  set_mem_align (y, MEM_ALIGN (x));
916 	}
917     }
918 
919   fmt = GET_RTX_FORMAT (code);
920   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
921     {
922       switch (fmt[i])
923 	{
924 	case 'E':
925 	  /* Two vectors must have the same length.  */
926 	  if (XVECLEN (x, i) != XVECLEN (y, i))
927 	    return;
928 
929 	  for (j = 0; j < XVECLEN (x, i); j++)
930 	    merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
931 
932 	  break;
933 
934 	case 'e':
935 	  merge_memattrs (XEXP (x, i), XEXP (y, i));
936 	}
937     }
938   return;
939 }
940 
941 
942 /* Return true if I1 and I2 are equivalent and thus can be crossjumped.  */
943 
944 static bool
old_insns_match_p(int mode ATTRIBUTE_UNUSED,rtx i1,rtx i2)945 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
946 {
947   rtx p1, p2;
948 
949   /* Verify that I1 and I2 are equivalent.  */
950   if (GET_CODE (i1) != GET_CODE (i2))
951     return false;
952 
953   p1 = PATTERN (i1);
954   p2 = PATTERN (i2);
955 
956   if (GET_CODE (p1) != GET_CODE (p2))
957     return false;
958 
959   /* If this is a CALL_INSN, compare register usage information.
960      If we don't check this on stack register machines, the two
961      CALL_INSNs might be merged leaving reg-stack.c with mismatching
962      numbers of stack registers in the same basic block.
963      If we don't check this on machines with delay slots, a delay slot may
964      be filled that clobbers a parameter expected by the subroutine.
965 
966      ??? We take the simple route for now and assume that if they're
967      equal, they were constructed identically.  */
968 
969   if (CALL_P (i1)
970       && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
971 			CALL_INSN_FUNCTION_USAGE (i2))
972 	  || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
973     return false;
974 
975 #ifdef STACK_REGS
976   /* If cross_jump_death_matters is not 0, the insn's mode
977      indicates whether or not the insn contains any stack-like
978      regs.  */
979 
980   if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
981     {
982       /* If register stack conversion has already been done, then
983 	 death notes must also be compared before it is certain that
984 	 the two instruction streams match.  */
985 
986       rtx note;
987       HARD_REG_SET i1_regset, i2_regset;
988 
989       CLEAR_HARD_REG_SET (i1_regset);
990       CLEAR_HARD_REG_SET (i2_regset);
991 
992       for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
993 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
994 	  SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
995 
996       for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
997 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
998 	  SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
999 
1000       GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
1001 
1002       return false;
1003 
1004     done:
1005       ;
1006     }
1007 #endif
1008 
1009   if (reload_completed
1010       ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1011     return true;
1012 
1013   /* Do not do EQUIV substitution after reload.  First, we're undoing the
1014      work of reload_cse.  Second, we may be undoing the work of the post-
1015      reload splitting pass.  */
1016   /* ??? Possibly add a new phase switch variable that can be used by
1017      targets to disallow the troublesome insns after splitting.  */
1018   if (!reload_completed)
1019     {
1020       /* The following code helps take care of G++ cleanups.  */
1021       rtx equiv1 = find_reg_equal_equiv_note (i1);
1022       rtx equiv2 = find_reg_equal_equiv_note (i2);
1023 
1024       if (equiv1 && equiv2
1025 	  /* If the equivalences are not to a constant, they may
1026 	     reference pseudos that no longer exist, so we can't
1027 	     use them.  */
1028 	  && (! reload_completed
1029 	      || (CONSTANT_P (XEXP (equiv1, 0))
1030 		  && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1031 	{
1032 	  rtx s1 = single_set (i1);
1033 	  rtx s2 = single_set (i2);
1034 	  if (s1 != 0 && s2 != 0
1035 	      && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1036 	    {
1037 	      validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1038 	      validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1039 	      if (! rtx_renumbered_equal_p (p1, p2))
1040 		cancel_changes (0);
1041 	      else if (apply_change_group ())
1042 		return true;
1043 	    }
1044 	}
1045     }
1046 
1047   return false;
1048 }
1049 
1050 /* Look through the insns at the end of BB1 and BB2 and find the longest
1051    sequence that are equivalent.  Store the first insns for that sequence
1052    in *F1 and *F2 and return the sequence length.
1053 
1054    To simplify callers of this function, if the blocks match exactly,
1055    store the head of the blocks in *F1 and *F2.  */
1056 
1057 static int
flow_find_cross_jump(int mode ATTRIBUTE_UNUSED,basic_block bb1,basic_block bb2,rtx * f1,rtx * f2)1058 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1059 		      basic_block bb2, rtx *f1, rtx *f2)
1060 {
1061   rtx i1, i2, last1, last2, afterlast1, afterlast2;
1062   int ninsns = 0;
1063 
1064   /* Skip simple jumps at the end of the blocks.  Complex jumps still
1065      need to be compared for equivalence, which we'll do below.  */
1066 
1067   i1 = BB_END (bb1);
1068   last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1069   if (onlyjump_p (i1)
1070       || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1071     {
1072       last1 = i1;
1073       i1 = PREV_INSN (i1);
1074     }
1075 
1076   i2 = BB_END (bb2);
1077   if (onlyjump_p (i2)
1078       || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1079     {
1080       last2 = i2;
1081       /* Count everything except for unconditional jump as insn.  */
1082       if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1083 	ninsns++;
1084       i2 = PREV_INSN (i2);
1085     }
1086 
1087   while (true)
1088     {
1089       /* Ignore notes.  */
1090       while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1091 	i1 = PREV_INSN (i1);
1092 
1093       while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1094 	i2 = PREV_INSN (i2);
1095 
1096       if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1097 	break;
1098 
1099       if (!old_insns_match_p (mode, i1, i2))
1100 	break;
1101 
1102       merge_memattrs (i1, i2);
1103 
1104       /* Don't begin a cross-jump with a NOTE insn.  */
1105       if (INSN_P (i1))
1106 	{
1107 	  /* If the merged insns have different REG_EQUAL notes, then
1108 	     remove them.  */
1109 	  rtx equiv1 = find_reg_equal_equiv_note (i1);
1110 	  rtx equiv2 = find_reg_equal_equiv_note (i2);
1111 
1112 	  if (equiv1 && !equiv2)
1113 	    remove_note (i1, equiv1);
1114 	  else if (!equiv1 && equiv2)
1115 	    remove_note (i2, equiv2);
1116 	  else if (equiv1 && equiv2
1117 		   && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1118 	    {
1119 	      remove_note (i1, equiv1);
1120 	      remove_note (i2, equiv2);
1121 	    }
1122 
1123 	  afterlast1 = last1, afterlast2 = last2;
1124 	  last1 = i1, last2 = i2;
1125 	  ninsns++;
1126 	}
1127 
1128       i1 = PREV_INSN (i1);
1129       i2 = PREV_INSN (i2);
1130     }
1131 
1132 #ifdef HAVE_cc0
1133   /* Don't allow the insn after a compare to be shared by
1134      cross-jumping unless the compare is also shared.  */
1135   if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1136     last1 = afterlast1, last2 = afterlast2, ninsns--;
1137 #endif
1138 
1139   /* Include preceding notes and labels in the cross-jump.  One,
1140      this may bring us to the head of the blocks as requested above.
1141      Two, it keeps line number notes as matched as may be.  */
1142   if (ninsns)
1143     {
1144       while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1145 	last1 = PREV_INSN (last1);
1146 
1147       if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1148 	last1 = PREV_INSN (last1);
1149 
1150       while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1151 	last2 = PREV_INSN (last2);
1152 
1153       if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1154 	last2 = PREV_INSN (last2);
1155 
1156       *f1 = last1;
1157       *f2 = last2;
1158     }
1159 
1160   return ninsns;
1161 }
1162 
1163 /* Return true iff the condbranches at the end of BB1 and BB2 match.  */
1164 bool
condjump_equiv_p(struct equiv_info * info,bool call_init)1165 condjump_equiv_p (struct equiv_info *info, bool call_init)
1166 {
1167   basic_block bb1 = info->x_block;
1168   basic_block bb2 = info->y_block;
1169   edge b1 = BRANCH_EDGE (bb1);
1170   edge b2 = BRANCH_EDGE (bb2);
1171   edge f1 = FALLTHRU_EDGE (bb1);
1172   edge f2 = FALLTHRU_EDGE (bb2);
1173   bool reverse, match;
1174   rtx set1, set2, cond1, cond2;
1175   rtx src1, src2;
1176   enum rtx_code code1, code2;
1177 
1178   /* Get around possible forwarders on fallthru edges.  Other cases
1179      should be optimized out already.  */
1180   if (FORWARDER_BLOCK_P (f1->dest))
1181     f1 = single_succ_edge (f1->dest);
1182 
1183   if (FORWARDER_BLOCK_P (f2->dest))
1184     f2 = single_succ_edge (f2->dest);
1185 
1186   /* To simplify use of this function, return false if there are
1187      unneeded forwarder blocks.  These will get eliminated later
1188      during cleanup_cfg.  */
1189   if (FORWARDER_BLOCK_P (f1->dest)
1190       || FORWARDER_BLOCK_P (f2->dest)
1191       || FORWARDER_BLOCK_P (b1->dest)
1192       || FORWARDER_BLOCK_P (b2->dest))
1193     return false;
1194 
1195   if (f1->dest == f2->dest && b1->dest == b2->dest)
1196     reverse = false;
1197   else if (f1->dest == b2->dest && b1->dest == f2->dest)
1198     reverse = true;
1199   else
1200     return false;
1201 
1202   set1 = pc_set (BB_END (bb1));
1203   set2 = pc_set (BB_END (bb2));
1204   if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1205       != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1206     reverse = !reverse;
1207 
1208   src1 = SET_SRC (set1);
1209   src2 = SET_SRC (set2);
1210   cond1 = XEXP (src1, 0);
1211   cond2 = XEXP (src2, 0);
1212   code1 = GET_CODE (cond1);
1213   if (reverse)
1214     code2 = reversed_comparison_code (cond2, BB_END (bb2));
1215   else
1216     code2 = GET_CODE (cond2);
1217 
1218   if (code2 == UNKNOWN)
1219     return false;
1220 
1221   if (call_init && !struct_equiv_init (STRUCT_EQUIV_START | info->mode, info))
1222     gcc_unreachable ();
1223   /* Make the sources of the pc sets unreadable so that when we call
1224      insns_match_p it won't process them.
1225      The death_notes_match_p from insns_match_p won't see the local registers
1226      used for the pc set, but that could only cause missed optimizations when
1227      there are actually condjumps that use stack registers.  */
1228   SET_SRC (set1) = pc_rtx;
1229   SET_SRC (set2) = pc_rtx;
1230   /* Verify codes and operands match.  */
1231   if (code1 == code2)
1232     {
1233       match = (insns_match_p (BB_END (bb1), BB_END (bb2), info)
1234 	       && rtx_equiv_p (&XEXP (cond1, 0), XEXP (cond2, 0), 1, info)
1235 	       && rtx_equiv_p (&XEXP (cond1, 1), XEXP (cond2, 1), 1, info));
1236 
1237     }
1238   else if (code1 == swap_condition (code2))
1239     {
1240       match = (insns_match_p (BB_END (bb1), BB_END (bb2), info)
1241 	       && rtx_equiv_p (&XEXP (cond1, 1), XEXP (cond2, 0), 1, info)
1242 	       && rtx_equiv_p (&XEXP (cond1, 0), XEXP (cond2, 1), 1, info));
1243 
1244     }
1245   else
1246     match = false;
1247   SET_SRC (set1) = src1;
1248   SET_SRC (set2) = src2;
1249   match &= verify_changes (0);
1250 
1251   /* If we return true, we will join the blocks.  Which means that
1252      we will only have one branch prediction bit to work with.  Thus
1253      we require the existing branches to have probabilities that are
1254      roughly similar.  */
1255   if (match
1256       && !optimize_size
1257       && maybe_hot_bb_p (bb1)
1258       && maybe_hot_bb_p (bb2))
1259     {
1260       int prob2;
1261 
1262       if (b1->dest == b2->dest)
1263 	prob2 = b2->probability;
1264       else
1265 	/* Do not use f2 probability as f2 may be forwarded.  */
1266 	prob2 = REG_BR_PROB_BASE - b2->probability;
1267 
1268       /* Fail if the difference in probabilities is greater than 50%.
1269 	 This rules out two well-predicted branches with opposite
1270 	 outcomes.  */
1271       if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1272 	{
1273 	  if (dump_file)
1274 	    fprintf (dump_file,
1275 		     "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1276 		     bb1->index, bb2->index, b1->probability, prob2);
1277 
1278 	  match = false;
1279 	}
1280     }
1281 
1282   if (dump_file && match)
1283     fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1284 	     bb1->index, bb2->index);
1285 
1286   if (!match)
1287     cancel_changes (0);
1288   return match;
1289 }
1290 
1291 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1292    the branch instruction.  This means that if we commonize the control
1293    flow before end of the basic block, the semantic remains unchanged.
1294 
1295    We may assume that there exists one edge with a common destination.  */
1296 
1297 static bool
outgoing_edges_match(int mode,basic_block bb1,basic_block bb2)1298 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1299 {
1300   int nehedges1 = 0, nehedges2 = 0;
1301   edge fallthru1 = 0, fallthru2 = 0;
1302   edge e1, e2;
1303   edge_iterator ei;
1304 
1305   /* If BB1 has only one successor, we may be looking at either an
1306      unconditional jump, or a fake edge to exit.  */
1307   if (single_succ_p (bb1)
1308       && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1309       && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1310     return (single_succ_p (bb2)
1311 	    && (single_succ_edge (bb2)->flags
1312 		& (EDGE_COMPLEX | EDGE_FAKE)) == 0
1313 	    && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1314 
1315   /* Match conditional jumps - this may get tricky when fallthru and branch
1316      edges are crossed.  */
1317   if (EDGE_COUNT (bb1->succs) == 2
1318       && any_condjump_p (BB_END (bb1))
1319       && onlyjump_p (BB_END (bb1)))
1320     {
1321       edge b1, f1, b2, f2;
1322       bool reverse, match;
1323       rtx set1, set2, cond1, cond2;
1324       enum rtx_code code1, code2;
1325 
1326       if (EDGE_COUNT (bb2->succs) != 2
1327 	  || !any_condjump_p (BB_END (bb2))
1328 	  || !onlyjump_p (BB_END (bb2)))
1329 	return false;
1330 
1331       b1 = BRANCH_EDGE (bb1);
1332       b2 = BRANCH_EDGE (bb2);
1333       f1 = FALLTHRU_EDGE (bb1);
1334       f2 = FALLTHRU_EDGE (bb2);
1335 
1336       /* Get around possible forwarders on fallthru edges.  Other cases
1337 	 should be optimized out already.  */
1338       if (FORWARDER_BLOCK_P (f1->dest))
1339 	f1 = single_succ_edge (f1->dest);
1340 
1341       if (FORWARDER_BLOCK_P (f2->dest))
1342 	f2 = single_succ_edge (f2->dest);
1343 
1344       /* To simplify use of this function, return false if there are
1345 	 unneeded forwarder blocks.  These will get eliminated later
1346 	 during cleanup_cfg.  */
1347       if (FORWARDER_BLOCK_P (f1->dest)
1348 	  || FORWARDER_BLOCK_P (f2->dest)
1349 	  || FORWARDER_BLOCK_P (b1->dest)
1350 	  || FORWARDER_BLOCK_P (b2->dest))
1351 	return false;
1352 
1353       if (f1->dest == f2->dest && b1->dest == b2->dest)
1354 	reverse = false;
1355       else if (f1->dest == b2->dest && b1->dest == f2->dest)
1356 	reverse = true;
1357       else
1358 	return false;
1359 
1360       set1 = pc_set (BB_END (bb1));
1361       set2 = pc_set (BB_END (bb2));
1362       if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1363 	  != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1364 	reverse = !reverse;
1365 
1366       cond1 = XEXP (SET_SRC (set1), 0);
1367       cond2 = XEXP (SET_SRC (set2), 0);
1368       code1 = GET_CODE (cond1);
1369       if (reverse)
1370 	code2 = reversed_comparison_code (cond2, BB_END (bb2));
1371       else
1372 	code2 = GET_CODE (cond2);
1373 
1374       if (code2 == UNKNOWN)
1375 	return false;
1376 
1377       /* Verify codes and operands match.  */
1378       match = ((code1 == code2
1379 		&& rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1380 		&& rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1381 	       || (code1 == swap_condition (code2)
1382 		   && rtx_renumbered_equal_p (XEXP (cond1, 1),
1383 					      XEXP (cond2, 0))
1384 		   && rtx_renumbered_equal_p (XEXP (cond1, 0),
1385 					      XEXP (cond2, 1))));
1386 
1387       /* If we return true, we will join the blocks.  Which means that
1388 	 we will only have one branch prediction bit to work with.  Thus
1389 	 we require the existing branches to have probabilities that are
1390 	 roughly similar.  */
1391       if (match
1392 	  && !optimize_size
1393 	  && maybe_hot_bb_p (bb1)
1394 	  && maybe_hot_bb_p (bb2))
1395 	{
1396 	  int prob2;
1397 
1398 	  if (b1->dest == b2->dest)
1399 	    prob2 = b2->probability;
1400 	  else
1401 	    /* Do not use f2 probability as f2 may be forwarded.  */
1402 	    prob2 = REG_BR_PROB_BASE - b2->probability;
1403 
1404 	  /* Fail if the difference in probabilities is greater than 50%.
1405 	     This rules out two well-predicted branches with opposite
1406 	     outcomes.  */
1407 	  if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1408 	    {
1409 	      if (dump_file)
1410 		fprintf (dump_file,
1411 			 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1412 			 bb1->index, bb2->index, b1->probability, prob2);
1413 
1414 	      return false;
1415 	    }
1416 	}
1417 
1418       if (dump_file && match)
1419 	fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1420 		 bb1->index, bb2->index);
1421 
1422       return match;
1423     }
1424 
1425   /* Generic case - we are seeing a computed jump, table jump or trapping
1426      instruction.  */
1427 
1428   /* Check whether there are tablejumps in the end of BB1 and BB2.
1429      Return true if they are identical.  */
1430     {
1431       rtx label1, label2;
1432       rtx table1, table2;
1433 
1434       if (tablejump_p (BB_END (bb1), &label1, &table1)
1435 	  && tablejump_p (BB_END (bb2), &label2, &table2)
1436 	  && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1437 	{
1438 	  /* The labels should never be the same rtx.  If they really are same
1439 	     the jump tables are same too. So disable crossjumping of blocks BB1
1440 	     and BB2 because when deleting the common insns in the end of BB1
1441 	     by delete_basic_block () the jump table would be deleted too.  */
1442 	  /* If LABEL2 is referenced in BB1->END do not do anything
1443 	     because we would loose information when replacing
1444 	     LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END.  */
1445 	  if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1446 	    {
1447 	      /* Set IDENTICAL to true when the tables are identical.  */
1448 	      bool identical = false;
1449 	      rtx p1, p2;
1450 
1451 	      p1 = PATTERN (table1);
1452 	      p2 = PATTERN (table2);
1453 	      if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1454 		{
1455 		  identical = true;
1456 		}
1457 	      else if (GET_CODE (p1) == ADDR_DIFF_VEC
1458 		       && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1459 		       && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1460 		       && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1461 		{
1462 		  int i;
1463 
1464 		  identical = true;
1465 		  for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1466 		    if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1467 		      identical = false;
1468 		}
1469 
1470 	      if (identical)
1471 		{
1472 		  replace_label_data rr;
1473 		  bool match;
1474 
1475 		  /* Temporarily replace references to LABEL1 with LABEL2
1476 		     in BB1->END so that we could compare the instructions.  */
1477 		  rr.r1 = label1;
1478 		  rr.r2 = label2;
1479 		  rr.update_label_nuses = false;
1480 		  for_each_rtx (&BB_END (bb1), replace_label, &rr);
1481 
1482 		  match = old_insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1483 		  if (dump_file && match)
1484 		    fprintf (dump_file,
1485 			     "Tablejumps in bb %i and %i match.\n",
1486 			     bb1->index, bb2->index);
1487 
1488 		  /* Set the original label in BB1->END because when deleting
1489 		     a block whose end is a tablejump, the tablejump referenced
1490 		     from the instruction is deleted too.  */
1491 		  rr.r1 = label2;
1492 		  rr.r2 = label1;
1493 		  for_each_rtx (&BB_END (bb1), replace_label, &rr);
1494 
1495 		  return match;
1496 		}
1497 	    }
1498 	  return false;
1499 	}
1500     }
1501 
1502   /* First ensure that the instructions match.  There may be many outgoing
1503      edges so this test is generally cheaper.  */
1504   if (!old_insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1505     return false;
1506 
1507   /* Search the outgoing edges, ensure that the counts do match, find possible
1508      fallthru and exception handling edges since these needs more
1509      validation.  */
1510   if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1511     return false;
1512 
1513   FOR_EACH_EDGE (e1, ei, bb1->succs)
1514     {
1515       e2 = EDGE_SUCC (bb2, ei.index);
1516 
1517       if (e1->flags & EDGE_EH)
1518 	nehedges1++;
1519 
1520       if (e2->flags & EDGE_EH)
1521 	nehedges2++;
1522 
1523       if (e1->flags & EDGE_FALLTHRU)
1524 	fallthru1 = e1;
1525       if (e2->flags & EDGE_FALLTHRU)
1526 	fallthru2 = e2;
1527     }
1528 
1529   /* If number of edges of various types does not match, fail.  */
1530   if (nehedges1 != nehedges2
1531       || (fallthru1 != 0) != (fallthru2 != 0))
1532     return false;
1533 
1534   /* fallthru edges must be forwarded to the same destination.  */
1535   if (fallthru1)
1536     {
1537       basic_block d1 = (forwarder_block_p (fallthru1->dest)
1538 			? single_succ (fallthru1->dest): fallthru1->dest);
1539       basic_block d2 = (forwarder_block_p (fallthru2->dest)
1540 			? single_succ (fallthru2->dest): fallthru2->dest);
1541 
1542       if (d1 != d2)
1543 	return false;
1544     }
1545 
1546   /* Ensure the same EH region.  */
1547   {
1548     rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1549     rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1550 
1551     if (!n1 && n2)
1552       return false;
1553 
1554     if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1555       return false;
1556   }
1557 
1558   /* The same checks as in try_crossjump_to_edge. It is required for RTL
1559      version of sequence abstraction.  */
1560   FOR_EACH_EDGE (e1, ei, bb2->succs)
1561     {
1562       edge e2;
1563       edge_iterator ei;
1564       basic_block d1 = e1->dest;
1565 
1566       if (FORWARDER_BLOCK_P (d1))
1567         d1 = EDGE_SUCC (d1, 0)->dest;
1568 
1569       FOR_EACH_EDGE (e2, ei, bb1->succs)
1570         {
1571           basic_block d2 = e2->dest;
1572           if (FORWARDER_BLOCK_P (d2))
1573             d2 = EDGE_SUCC (d2, 0)->dest;
1574           if (d1 == d2)
1575             break;
1576         }
1577 
1578       if (!e2)
1579         return false;
1580     }
1581 
1582   return true;
1583 }
1584 
1585 /* Returns true if BB basic block has a preserve label.  */
1586 
1587 static bool
block_has_preserve_label(basic_block bb)1588 block_has_preserve_label (basic_block bb)
1589 {
1590   return (bb
1591           && block_label (bb)
1592           && LABEL_PRESERVE_P (block_label (bb)));
1593 }
1594 
1595 /* E1 and E2 are edges with the same destination block.  Search their
1596    predecessors for common code.  If found, redirect control flow from
1597    (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC.  */
1598 
1599 static bool
try_crossjump_to_edge(int mode,edge e1,edge e2)1600 try_crossjump_to_edge (int mode, edge e1, edge e2)
1601 {
1602   int nmatch;
1603   basic_block src1 = e1->src, src2 = e2->src;
1604   basic_block redirect_to, redirect_from, to_remove;
1605   rtx newpos1, newpos2;
1606   edge s;
1607   edge_iterator ei;
1608 
1609   newpos1 = newpos2 = NULL_RTX;
1610 
1611   /* If we have partitioned hot/cold basic blocks, it is a bad idea
1612      to try this optimization.
1613 
1614      Basic block partitioning may result in some jumps that appear to
1615      be optimizable (or blocks that appear to be mergeable), but which really
1616      must be left untouched (they are required to make it safely across
1617      partition boundaries).  See the comments at the top of
1618      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
1619 
1620   if (flag_reorder_blocks_and_partition && no_new_pseudos)
1621     return false;
1622 
1623   /* Search backward through forwarder blocks.  We don't need to worry
1624      about multiple entry or chained forwarders, as they will be optimized
1625      away.  We do this to look past the unconditional jump following a
1626      conditional jump that is required due to the current CFG shape.  */
1627   if (single_pred_p (src1)
1628       && FORWARDER_BLOCK_P (src1))
1629     e1 = single_pred_edge (src1), src1 = e1->src;
1630 
1631   if (single_pred_p (src2)
1632       && FORWARDER_BLOCK_P (src2))
1633     e2 = single_pred_edge (src2), src2 = e2->src;
1634 
1635   /* Nothing to do if we reach ENTRY, or a common source block.  */
1636   if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1637     return false;
1638   if (src1 == src2)
1639     return false;
1640 
1641   /* Seeing more than 1 forwarder blocks would confuse us later...  */
1642   if (FORWARDER_BLOCK_P (e1->dest)
1643       && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1644     return false;
1645 
1646   if (FORWARDER_BLOCK_P (e2->dest)
1647       && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1648     return false;
1649 
1650   /* Likewise with dead code (possibly newly created by the other optimizations
1651      of cfg_cleanup).  */
1652   if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1653     return false;
1654 
1655   /* Look for the common insn sequence, part the first ...  */
1656   if (!outgoing_edges_match (mode, src1, src2))
1657     return false;
1658 
1659   /* ... and part the second.  */
1660   nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1661 
1662   /* Don't proceed with the crossjump unless we found a sufficient number
1663      of matching instructions or the 'from' block was totally matched
1664      (such that its predecessors will hopefully be redirected and the
1665      block removed).  */
1666   if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1667       && (newpos1 != BB_HEAD (src1)))
1668     return false;
1669 
1670   /* Avoid deleting preserve label when redirecting ABNORMAL edges.  */
1671   if (block_has_preserve_label (e1->dest)
1672       && (e1->flags & EDGE_ABNORMAL))
1673     return false;
1674 
1675   /* Here we know that the insns in the end of SRC1 which are common with SRC2
1676      will be deleted.
1677      If we have tablejumps in the end of SRC1 and SRC2
1678      they have been already compared for equivalence in outgoing_edges_match ()
1679      so replace the references to TABLE1 by references to TABLE2.  */
1680     {
1681       rtx label1, label2;
1682       rtx table1, table2;
1683 
1684       if (tablejump_p (BB_END (src1), &label1, &table1)
1685 	  && tablejump_p (BB_END (src2), &label2, &table2)
1686 	  && label1 != label2)
1687 	{
1688 	  replace_label_data rr;
1689 	  rtx insn;
1690 
1691 	  /* Replace references to LABEL1 with LABEL2.  */
1692 	  rr.r1 = label1;
1693 	  rr.r2 = label2;
1694 	  rr.update_label_nuses = true;
1695 	  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1696 	    {
1697 	      /* Do not replace the label in SRC1->END because when deleting
1698 		 a block whose end is a tablejump, the tablejump referenced
1699 		 from the instruction is deleted too.  */
1700 	      if (insn != BB_END (src1))
1701 		for_each_rtx (&insn, replace_label, &rr);
1702 	    }
1703 	}
1704     }
1705 
1706   /* Avoid splitting if possible.  We must always split when SRC2 has
1707      EH predecessor edges, or we may end up with basic blocks with both
1708      normal and EH predecessor edges.  */
1709   if (newpos2 == BB_HEAD (src2)
1710       && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
1711     redirect_to = src2;
1712   else
1713     {
1714       if (newpos2 == BB_HEAD (src2))
1715 	{
1716 	  /* Skip possible basic block header.  */
1717 	  if (LABEL_P (newpos2))
1718 	    newpos2 = NEXT_INSN (newpos2);
1719 	  if (NOTE_P (newpos2))
1720 	    newpos2 = NEXT_INSN (newpos2);
1721 	}
1722 
1723       if (dump_file)
1724 	fprintf (dump_file, "Splitting bb %i before %i insns\n",
1725 		 src2->index, nmatch);
1726       redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1727     }
1728 
1729   if (dump_file)
1730     fprintf (dump_file,
1731 	     "Cross jumping from bb %i to bb %i; %i common insns\n",
1732 	     src1->index, src2->index, nmatch);
1733 
1734   redirect_to->count += src1->count;
1735   redirect_to->frequency += src1->frequency;
1736   /* We may have some registers visible through the block.  */
1737   redirect_to->flags |= BB_DIRTY;
1738 
1739   /* Recompute the frequencies and counts of outgoing edges.  */
1740   FOR_EACH_EDGE (s, ei, redirect_to->succs)
1741     {
1742       edge s2;
1743       edge_iterator ei;
1744       basic_block d = s->dest;
1745 
1746       if (FORWARDER_BLOCK_P (d))
1747 	d = single_succ (d);
1748 
1749       FOR_EACH_EDGE (s2, ei, src1->succs)
1750 	{
1751 	  basic_block d2 = s2->dest;
1752 	  if (FORWARDER_BLOCK_P (d2))
1753 	    d2 = single_succ (d2);
1754 	  if (d == d2)
1755 	    break;
1756 	}
1757 
1758       s->count += s2->count;
1759 
1760       /* Take care to update possible forwarder blocks.  We verified
1761 	 that there is no more than one in the chain, so we can't run
1762 	 into infinite loop.  */
1763       if (FORWARDER_BLOCK_P (s->dest))
1764 	{
1765 	  single_succ_edge (s->dest)->count += s2->count;
1766 	  s->dest->count += s2->count;
1767 	  s->dest->frequency += EDGE_FREQUENCY (s);
1768 	}
1769 
1770       if (FORWARDER_BLOCK_P (s2->dest))
1771 	{
1772 	  single_succ_edge (s2->dest)->count -= s2->count;
1773 	  if (single_succ_edge (s2->dest)->count < 0)
1774 	    single_succ_edge (s2->dest)->count = 0;
1775 	  s2->dest->count -= s2->count;
1776 	  s2->dest->frequency -= EDGE_FREQUENCY (s);
1777 	  if (s2->dest->frequency < 0)
1778 	    s2->dest->frequency = 0;
1779 	  if (s2->dest->count < 0)
1780 	    s2->dest->count = 0;
1781 	}
1782 
1783       if (!redirect_to->frequency && !src1->frequency)
1784 	s->probability = (s->probability + s2->probability) / 2;
1785       else
1786 	s->probability
1787 	  = ((s->probability * redirect_to->frequency +
1788 	      s2->probability * src1->frequency)
1789 	     / (redirect_to->frequency + src1->frequency));
1790     }
1791 
1792   update_br_prob_note (redirect_to);
1793 
1794   /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1.  */
1795 
1796   /* Skip possible basic block header.  */
1797   if (LABEL_P (newpos1))
1798     newpos1 = NEXT_INSN (newpos1);
1799 
1800   if (NOTE_P (newpos1))
1801     newpos1 = NEXT_INSN (newpos1);
1802 
1803   redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1804   to_remove = single_succ (redirect_from);
1805 
1806   redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
1807   delete_basic_block (to_remove);
1808 
1809   update_forwarder_flag (redirect_from);
1810   if (redirect_to != src2)
1811     update_forwarder_flag (src2);
1812 
1813   return true;
1814 }
1815 
1816 /* Search the predecessors of BB for common insn sequences.  When found,
1817    share code between them by redirecting control flow.  Return true if
1818    any changes made.  */
1819 
1820 static bool
try_crossjump_bb(int mode,basic_block bb)1821 try_crossjump_bb (int mode, basic_block bb)
1822 {
1823   edge e, e2, fallthru;
1824   bool changed;
1825   unsigned max, ix, ix2;
1826   basic_block ev, ev2;
1827   edge_iterator ei;
1828 
1829   /* Nothing to do if there is not at least two incoming edges.  */
1830   if (EDGE_COUNT (bb->preds) < 2)
1831     return false;
1832 
1833   /* Don't crossjump if this block ends in a computed jump,
1834      unless we are optimizing for size.  */
1835   if (!optimize_size
1836       && bb != EXIT_BLOCK_PTR
1837       && computed_jump_p (BB_END (bb)))
1838     return false;
1839 
1840   /* If we are partitioning hot/cold basic blocks, we don't want to
1841      mess up unconditional or indirect jumps that cross between hot
1842      and cold sections.
1843 
1844      Basic block partitioning may result in some jumps that appear to
1845      be optimizable (or blocks that appear to be mergeable), but which really
1846      must be left untouched (they are required to make it safely across
1847      partition boundaries).  See the comments at the top of
1848      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
1849 
1850   if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
1851 					BB_PARTITION (EDGE_PRED (bb, 1)->src)
1852       || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
1853     return false;
1854 
1855   /* It is always cheapest to redirect a block that ends in a branch to
1856      a block that falls through into BB, as that adds no branches to the
1857      program.  We'll try that combination first.  */
1858   fallthru = NULL;
1859   max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1860 
1861   if (EDGE_COUNT (bb->preds) > max)
1862     return false;
1863 
1864   FOR_EACH_EDGE (e, ei, bb->preds)
1865     {
1866       if (e->flags & EDGE_FALLTHRU)
1867 	fallthru = e;
1868     }
1869 
1870   changed = false;
1871   for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1872     {
1873       e = EDGE_PRED (ev, ix);
1874       ix++;
1875 
1876       /* As noted above, first try with the fallthru predecessor.  */
1877       if (fallthru)
1878 	{
1879 	  /* Don't combine the fallthru edge into anything else.
1880 	     If there is a match, we'll do it the other way around.  */
1881 	  if (e == fallthru)
1882 	    continue;
1883 	  /* If nothing changed since the last attempt, there is nothing
1884 	     we can do.  */
1885 	  if (!first_pass
1886 	      && (!(e->src->flags & BB_DIRTY)
1887 		  && !(fallthru->src->flags & BB_DIRTY)))
1888 	    continue;
1889 
1890 	  if (try_crossjump_to_edge (mode, e, fallthru))
1891 	    {
1892 	      changed = true;
1893 	      ix = 0;
1894 	      ev = bb;
1895 	      continue;
1896 	    }
1897 	}
1898 
1899       /* Non-obvious work limiting check: Recognize that we're going
1900 	 to call try_crossjump_bb on every basic block.  So if we have
1901 	 two blocks with lots of outgoing edges (a switch) and they
1902 	 share lots of common destinations, then we would do the
1903 	 cross-jump check once for each common destination.
1904 
1905 	 Now, if the blocks actually are cross-jump candidates, then
1906 	 all of their destinations will be shared.  Which means that
1907 	 we only need check them for cross-jump candidacy once.  We
1908 	 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1909 	 choosing to do the check from the block for which the edge
1910 	 in question is the first successor of A.  */
1911       if (EDGE_SUCC (e->src, 0) != e)
1912 	continue;
1913 
1914       for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1915 	{
1916 	  e2 = EDGE_PRED (ev2, ix2);
1917 	  ix2++;
1918 
1919 	  if (e2 == e)
1920 	    continue;
1921 
1922 	  /* We've already checked the fallthru edge above.  */
1923 	  if (e2 == fallthru)
1924 	    continue;
1925 
1926 	  /* The "first successor" check above only prevents multiple
1927 	     checks of crossjump(A,B).  In order to prevent redundant
1928 	     checks of crossjump(B,A), require that A be the block
1929 	     with the lowest index.  */
1930 	  if (e->src->index > e2->src->index)
1931 	    continue;
1932 
1933 	  /* If nothing changed since the last attempt, there is nothing
1934 	     we can do.  */
1935 	  if (!first_pass
1936 	      && (!(e->src->flags & BB_DIRTY)
1937 		  && !(e2->src->flags & BB_DIRTY)))
1938 	    continue;
1939 
1940 	  if (try_crossjump_to_edge (mode, e, e2))
1941 	    {
1942 	      changed = true;
1943 	      ev2 = bb;
1944 	      ix = 0;
1945 	      break;
1946 	    }
1947 	}
1948     }
1949 
1950   return changed;
1951 }
1952 
1953 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1954    instructions etc.  Return nonzero if changes were made.  */
1955 
1956 static bool
try_optimize_cfg(int mode)1957 try_optimize_cfg (int mode)
1958 {
1959   bool changed_overall = false;
1960   bool changed;
1961   int iterations = 0;
1962   basic_block bb, b, next;
1963 
1964   if (mode & CLEANUP_CROSSJUMP)
1965     add_noreturn_fake_exit_edges ();
1966 
1967   if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1968     clear_bb_flags ();
1969 
1970   FOR_EACH_BB (bb)
1971     update_forwarder_flag (bb);
1972 
1973   if (! targetm.cannot_modify_jumps_p ())
1974     {
1975       first_pass = true;
1976       /* Attempt to merge blocks as made possible by edge removal.  If
1977 	 a block has only one successor, and the successor has only
1978 	 one predecessor, they may be combined.  */
1979       do
1980 	{
1981 	  changed = false;
1982 	  iterations++;
1983 
1984 	  if (dump_file)
1985 	    fprintf (dump_file,
1986 		     "\n\ntry_optimize_cfg iteration %i\n\n",
1987 		     iterations);
1988 
1989 	  for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1990 	    {
1991 	      basic_block c;
1992 	      edge s;
1993 	      bool changed_here = false;
1994 
1995 	      /* Delete trivially dead basic blocks.  */
1996 	      while (EDGE_COUNT (b->preds) == 0)
1997 		{
1998 		  c = b->prev_bb;
1999 		  if (dump_file)
2000 		    fprintf (dump_file, "Deleting block %i.\n",
2001 			     b->index);
2002 
2003 		  delete_basic_block (b);
2004 		  if (!(mode & CLEANUP_CFGLAYOUT))
2005 		    changed = true;
2006 		  b = c;
2007 		}
2008 
2009 	      /* Remove code labels no longer used.  */
2010 	      if (single_pred_p (b)
2011 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2012 		  && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
2013 		  && LABEL_P (BB_HEAD (b))
2014 		  /* If the previous block ends with a branch to this
2015 		     block, we can't delete the label.  Normally this
2016 		     is a condjump that is yet to be simplified, but
2017 		     if CASE_DROPS_THRU, this can be a tablejump with
2018 		     some element going to the same place as the
2019 		     default (fallthru).  */
2020 		  && (single_pred (b) == ENTRY_BLOCK_PTR
2021 		      || !JUMP_P (BB_END (single_pred (b)))
2022 		      || ! label_is_jump_target_p (BB_HEAD (b),
2023 						   BB_END (single_pred (b)))))
2024 		{
2025 		  rtx label = BB_HEAD (b);
2026 
2027 		  delete_insn_chain (label, label);
2028 		  /* In the case label is undeletable, move it after the
2029 		     BASIC_BLOCK note.  */
2030 		  if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
2031 		    {
2032 		      rtx bb_note = NEXT_INSN (BB_HEAD (b));
2033 
2034 		      reorder_insns_nobb (label, label, bb_note);
2035 		      BB_HEAD (b) = bb_note;
2036 		    }
2037 		  if (dump_file)
2038 		    fprintf (dump_file, "Deleted label in block %i.\n",
2039 			     b->index);
2040 		}
2041 
2042 	      /* If we fall through an empty block, we can remove it.  */
2043 	      if (!(mode & CLEANUP_CFGLAYOUT)
2044 		  && single_pred_p (b)
2045 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2046 		  && !LABEL_P (BB_HEAD (b))
2047 		  && FORWARDER_BLOCK_P (b)
2048 		  /* Note that forwarder_block_p true ensures that
2049 		     there is a successor for this block.  */
2050 		  && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2051 		  && n_basic_blocks > NUM_FIXED_BLOCKS + 1)
2052 		{
2053 		  if (dump_file)
2054 		    fprintf (dump_file,
2055 			     "Deleting fallthru block %i.\n",
2056 			     b->index);
2057 
2058 		  c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
2059 		  redirect_edge_succ_nodup (single_pred_edge (b),
2060 					    single_succ (b));
2061 		  delete_basic_block (b);
2062 		  changed = true;
2063 		  b = c;
2064 		}
2065 
2066 	      if (single_succ_p (b)
2067 		  && (s = single_succ_edge (b))
2068 		  && !(s->flags & EDGE_COMPLEX)
2069 		  && (c = s->dest) != EXIT_BLOCK_PTR
2070 		  && single_pred_p (c)
2071 		  && b != c)
2072 		{
2073 		  /* When not in cfg_layout mode use code aware of reordering
2074 		     INSN.  This code possibly creates new basic blocks so it
2075 		     does not fit merge_blocks interface and is kept here in
2076 		     hope that it will become useless once more of compiler
2077 		     is transformed to use cfg_layout mode.  */
2078 
2079 		  if ((mode & CLEANUP_CFGLAYOUT)
2080 		      && can_merge_blocks_p (b, c))
2081 		    {
2082 		      merge_blocks (b, c);
2083 		      update_forwarder_flag (b);
2084 		      changed_here = true;
2085 		    }
2086 		  else if (!(mode & CLEANUP_CFGLAYOUT)
2087 			   /* If the jump insn has side effects,
2088 			      we can't kill the edge.  */
2089 			   && (!JUMP_P (BB_END (b))
2090 			       || (reload_completed
2091 				   ? simplejump_p (BB_END (b))
2092 				   : (onlyjump_p (BB_END (b))
2093 				      && !tablejump_p (BB_END (b),
2094 						       NULL, NULL))))
2095 			   && (next = merge_blocks_move (s, b, c, mode)))
2096 		      {
2097 			b = next;
2098 			changed_here = true;
2099 		      }
2100 		}
2101 
2102 	      /* Simplify branch over branch.  */
2103 	      if ((mode & CLEANUP_EXPENSIVE)
2104 		   && !(mode & CLEANUP_CFGLAYOUT)
2105 		   && try_simplify_condjump (b))
2106 		changed_here = true;
2107 
2108 	      /* If B has a single outgoing edge, but uses a
2109 		 non-trivial jump instruction without side-effects, we
2110 		 can either delete the jump entirely, or replace it
2111 		 with a simple unconditional jump.  */
2112 	      if (single_succ_p (b)
2113 		  && single_succ (b) != EXIT_BLOCK_PTR
2114 		  && onlyjump_p (BB_END (b))
2115 		  && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
2116 		  && try_redirect_by_replacing_jump (single_succ_edge (b),
2117 						     single_succ (b),
2118 						     (mode & CLEANUP_CFGLAYOUT) != 0))
2119 		{
2120 		  update_forwarder_flag (b);
2121 		  changed_here = true;
2122 		}
2123 
2124 	      /* Simplify branch to branch.  */
2125 	      if (try_forward_edges (mode, b))
2126 		changed_here = true;
2127 
2128 	      /* Look for shared code between blocks.  */
2129 	      if ((mode & CLEANUP_CROSSJUMP)
2130 		  && try_crossjump_bb (mode, b))
2131 		changed_here = true;
2132 
2133 	      /* Don't get confused by the index shift caused by
2134 		 deleting blocks.  */
2135 	      if (!changed_here)
2136 		b = b->next_bb;
2137 	      else
2138 		changed = true;
2139 	    }
2140 
2141 	  if ((mode & CLEANUP_CROSSJUMP)
2142 	      && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2143 	    changed = true;
2144 
2145 #ifdef ENABLE_CHECKING
2146 	  if (changed)
2147 	    verify_flow_info ();
2148 #endif
2149 
2150 	  changed_overall |= changed;
2151 	  first_pass = false;
2152 	}
2153       while (changed);
2154     }
2155 
2156   if (mode & CLEANUP_CROSSJUMP)
2157     remove_fake_exit_edges ();
2158 
2159   FOR_ALL_BB (b)
2160     b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2161 
2162   return changed_overall;
2163 }
2164 
2165 /* Delete all unreachable basic blocks.  */
2166 
2167 bool
delete_unreachable_blocks(void)2168 delete_unreachable_blocks (void)
2169 {
2170   bool changed = false;
2171   basic_block b, next_bb;
2172 
2173   find_unreachable_blocks ();
2174 
2175   /* Delete all unreachable basic blocks.  */
2176 
2177   for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2178     {
2179       next_bb = b->next_bb;
2180 
2181       if (!(b->flags & BB_REACHABLE))
2182 	{
2183 	  delete_basic_block (b);
2184 	  changed = true;
2185 	}
2186     }
2187 
2188   if (changed)
2189     tidy_fallthru_edges ();
2190   return changed;
2191 }
2192 
2193 /* Merges sequential blocks if possible.  */
2194 
2195 bool
merge_seq_blocks(void)2196 merge_seq_blocks (void)
2197 {
2198   basic_block bb;
2199   bool changed = false;
2200 
2201   for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
2202     {
2203       if (single_succ_p (bb)
2204 	  && can_merge_blocks_p (bb, single_succ (bb)))
2205 	{
2206 	  /* Merge the blocks and retry.  */
2207 	  merge_blocks (bb, single_succ (bb));
2208 	  changed = true;
2209 	  continue;
2210 	}
2211 
2212       bb = bb->next_bb;
2213     }
2214 
2215   return changed;
2216 }
2217 
2218 /* Tidy the CFG by deleting unreachable code and whatnot.  */
2219 
2220 bool
cleanup_cfg(int mode)2221 cleanup_cfg (int mode)
2222 {
2223   bool changed = false;
2224 
2225   timevar_push (TV_CLEANUP_CFG);
2226   if (delete_unreachable_blocks ())
2227     {
2228       changed = true;
2229       /* We've possibly created trivially dead code.  Cleanup it right
2230 	 now to introduce more opportunities for try_optimize_cfg.  */
2231       if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_UPDATE_LIFE))
2232 	  && !reload_completed)
2233 	delete_trivially_dead_insns (get_insns(), max_reg_num ());
2234     }
2235 
2236   compact_blocks ();
2237 
2238   while (try_optimize_cfg (mode))
2239     {
2240       delete_unreachable_blocks (), changed = true;
2241       if (mode & CLEANUP_UPDATE_LIFE)
2242 	{
2243 	  /* Cleaning up CFG introduces more opportunities for dead code
2244 	     removal that in turn may introduce more opportunities for
2245 	     cleaning up the CFG.  */
2246 	  if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
2247 						 PROP_DEATH_NOTES
2248 						 | PROP_SCAN_DEAD_CODE
2249 						 | PROP_KILL_DEAD_CODE
2250 						 | ((mode & CLEANUP_LOG_LINKS)
2251 						    ? PROP_LOG_LINKS : 0)))
2252 	    break;
2253 	}
2254       else if (!(mode & CLEANUP_NO_INSN_DEL)
2255 	       && (mode & CLEANUP_EXPENSIVE)
2256 	       && !reload_completed)
2257 	{
2258 	  if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2259 	    break;
2260 	}
2261       else
2262 	break;
2263       delete_dead_jumptables ();
2264     }
2265 
2266   timevar_pop (TV_CLEANUP_CFG);
2267 
2268   return changed;
2269 }
2270 
2271 static unsigned int
rest_of_handle_jump(void)2272 rest_of_handle_jump (void)
2273 {
2274   delete_unreachable_blocks ();
2275 
2276   if (cfun->tail_call_emit)
2277     fixup_tail_calls ();
2278   return 0;
2279 }
2280 
2281 struct tree_opt_pass pass_jump =
2282 {
2283   "sibling",                            /* name */
2284   NULL,                                 /* gate */
2285   rest_of_handle_jump,			/* execute */
2286   NULL,                                 /* sub */
2287   NULL,                                 /* next */
2288   0,                                    /* static_pass_number */
2289   TV_JUMP,                              /* tv_id */
2290   0,                                    /* properties_required */
2291   0,                                    /* properties_provided */
2292   0,                                    /* properties_destroyed */
2293   TODO_ggc_collect,                     /* todo_flags_start */
2294   TODO_dump_func |
2295   TODO_verify_flow,                     /* todo_flags_finish */
2296   'i'                                   /* letter */
2297 };
2298 
2299 
2300 static unsigned int
rest_of_handle_jump2(void)2301 rest_of_handle_jump2 (void)
2302 {
2303   /* Turn NOTE_INSN_EXPECTED_VALUE into REG_BR_PROB.  Do this
2304      before jump optimization switches branch directions.  */
2305   if (flag_guess_branch_prob)
2306     expected_value_to_br_prob ();
2307 
2308   delete_trivially_dead_insns (get_insns (), max_reg_num ());
2309   reg_scan (get_insns (), max_reg_num ());
2310   if (dump_file)
2311     dump_flow_info (dump_file, dump_flags);
2312   cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
2313 	       | (flag_thread_jumps ? CLEANUP_THREADING : 0));
2314 
2315   purge_line_number_notes ();
2316 
2317   if (optimize)
2318     cleanup_cfg (CLEANUP_EXPENSIVE);
2319 
2320   /* Jump optimization, and the removal of NULL pointer checks, may
2321      have reduced the number of instructions substantially.  CSE, and
2322      future passes, allocate arrays whose dimensions involve the
2323      maximum instruction UID, so if we can reduce the maximum UID
2324      we'll save big on memory.  */
2325   renumber_insns ();
2326   return 0;
2327 }
2328 
2329 
2330 struct tree_opt_pass pass_jump2 =
2331 {
2332   "jump",                               /* name */
2333   NULL,                                 /* gate */
2334   rest_of_handle_jump2,			/* execute */
2335   NULL,                                 /* sub */
2336   NULL,                                 /* next */
2337   0,                                    /* static_pass_number */
2338   TV_JUMP,                              /* tv_id */
2339   0,                                    /* properties_required */
2340   0,                                    /* properties_provided */
2341   0,                                    /* properties_destroyed */
2342   TODO_ggc_collect,                     /* todo_flags_start */
2343   TODO_dump_func,                       /* todo_flags_finish */
2344   'j'                                   /* letter */
2345 };
2346 
2347 
2348