xref: /dragonfly/contrib/gcc-4.7/gcc/bb-reorder.c (revision 82730a9c)
1 /* Basic block reordering routines for the GNU compiler.
2    Copyright (C) 2000, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011,
3    2012 Free Software Foundation, Inc.
4 
5    This file is part of GCC.
6 
7    GCC is free software; you can redistribute it and/or modify it
8    under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3, or (at your option)
10    any later version.
11 
12    GCC is distributed in the hope that it will be useful, but WITHOUT
13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15    License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with GCC; see the file COPYING3.  If not see
19    <http://www.gnu.org/licenses/>.  */
20 
21 /* This (greedy) algorithm constructs traces in several rounds.
22    The construction starts from "seeds".  The seed for the first round
23    is the entry point of function.  When there are more than one seed
24    that one is selected first that has the lowest key in the heap
25    (see function bb_to_key).  Then the algorithm repeatedly adds the most
26    probable successor to the end of a trace.  Finally it connects the traces.
27 
28    There are two parameters: Branch Threshold and Exec Threshold.
29    If the edge to a successor of the actual basic block is lower than
30    Branch Threshold or the frequency of the successor is lower than
31    Exec Threshold the successor will be the seed in one of the next rounds.
32    Each round has these parameters lower than the previous one.
33    The last round has to have these parameters set to zero
34    so that the remaining blocks are picked up.
35 
36    The algorithm selects the most probable successor from all unvisited
37    successors and successors that have been added to this trace.
38    The other successors (that has not been "sent" to the next round) will be
39    other seeds for this round and the secondary traces will start in them.
40    If the successor has not been visited in this trace it is added to the trace
41    (however, there is some heuristic for simple branches).
42    If the successor has been visited in this trace the loop has been found.
43    If the loop has many iterations the loop is rotated so that the
44    source block of the most probable edge going out from the loop
45    is the last block of the trace.
46    If the loop has few iterations and there is no edge from the last block of
47    the loop going out from loop the loop header is duplicated.
48    Finally, the construction of the trace is terminated.
49 
50    When connecting traces it first checks whether there is an edge from the
51    last block of one trace to the first block of another trace.
52    When there are still some unconnected traces it checks whether there exists
53    a basic block BB such that BB is a successor of the last bb of one trace
54    and BB is a predecessor of the first block of another trace. In this case,
55    BB is duplicated and the traces are connected through this duplicate.
56    The rest of traces are simply connected so there will be a jump to the
57    beginning of the rest of trace.
58 
59 
60    References:
61 
62    "Software Trace Cache"
63    A. Ramirez, J. Larriba-Pey, C. Navarro, J. Torrellas and M. Valero; 1999
64    http://citeseer.nj.nec.com/15361.html
65 
66 */
67 
68 #include "config.h"
69 #include "system.h"
70 #include "coretypes.h"
71 #include "tm.h"
72 #include "rtl.h"
73 #include "regs.h"
74 #include "flags.h"
75 #include "timevar.h"
76 #include "output.h"
77 #include "cfglayout.h"
78 #include "fibheap.h"
79 #include "target.h"
80 #include "function.h"
81 #include "tm_p.h"
82 #include "obstack.h"
83 #include "expr.h"
84 #include "params.h"
85 #include "diagnostic-core.h"
86 #include "toplev.h" /* user_defined_section_attribute */
87 #include "tree-pass.h"
88 #include "df.h"
89 #include "bb-reorder.h"
90 #include "except.h"
91 
92 /* The number of rounds.  In most cases there will only be 4 rounds, but
93    when partitioning hot and cold basic blocks into separate sections of
94    the .o file there will be an extra round.*/
95 #define N_ROUNDS 5
96 
97 /* Stubs in case we don't have a return insn.
98    We have to check at runtime too, not only compiletime.  */
99 
100 #ifndef HAVE_return
101 #define HAVE_return 0
102 #define gen_return() NULL_RTX
103 #endif
104 
105 
106 struct target_bb_reorder default_target_bb_reorder;
107 #if SWITCHABLE_TARGET
108 struct target_bb_reorder *this_target_bb_reorder = &default_target_bb_reorder;
109 #endif
110 
111 #define uncond_jump_length \
112   (this_target_bb_reorder->x_uncond_jump_length)
113 
114 /* Branch thresholds in thousandths (per mille) of the REG_BR_PROB_BASE.  */
115 static int branch_threshold[N_ROUNDS] = {400, 200, 100, 0, 0};
116 
117 /* Exec thresholds in thousandths (per mille) of the frequency of bb 0.  */
118 static int exec_threshold[N_ROUNDS] = {500, 200, 50, 0, 0};
119 
120 /* If edge frequency is lower than DUPLICATION_THRESHOLD per mille of entry
121    block the edge destination is not duplicated while connecting traces.  */
122 #define DUPLICATION_THRESHOLD 100
123 
124 /* Structure to hold needed information for each basic block.  */
125 typedef struct bbro_basic_block_data_def
126 {
127   /* Which trace is the bb start of (-1 means it is not a start of a trace).  */
128   int start_of_trace;
129 
130   /* Which trace is the bb end of (-1 means it is not an end of a trace).  */
131   int end_of_trace;
132 
133   /* Which trace is the bb in?  */
134   int in_trace;
135 
136   /* Which heap is BB in (if any)?  */
137   fibheap_t heap;
138 
139   /* Which heap node is BB in (if any)?  */
140   fibnode_t node;
141 } bbro_basic_block_data;
142 
143 /* The current size of the following dynamic array.  */
144 static int array_size;
145 
146 /* The array which holds needed information for basic blocks.  */
147 static bbro_basic_block_data *bbd;
148 
149 /* To avoid frequent reallocation the size of arrays is greater than needed,
150    the number of elements is (not less than) 1.25 * size_wanted.  */
151 #define GET_ARRAY_SIZE(X) ((((X) / 4) + 1) * 5)
152 
153 /* Free the memory and set the pointer to NULL.  */
154 #define FREE(P) (gcc_assert (P), free (P), P = 0)
155 
156 /* Structure for holding information about a trace.  */
157 struct trace
158 {
159   /* First and last basic block of the trace.  */
160   basic_block first, last;
161 
162   /* The round of the STC creation which this trace was found in.  */
163   int round;
164 
165   /* The length (i.e. the number of basic blocks) of the trace.  */
166   int length;
167 };
168 
169 /* Maximum frequency and count of one of the entry blocks.  */
170 static int max_entry_frequency;
171 static gcov_type max_entry_count;
172 
173 /* Local function prototypes.  */
174 static void find_traces (int *, struct trace *);
175 static basic_block rotate_loop (edge, struct trace *, int);
176 static void mark_bb_visited (basic_block, int);
177 static void find_traces_1_round (int, int, gcov_type, struct trace *, int *,
178 				 int, fibheap_t *, int);
179 static basic_block copy_bb (basic_block, edge, basic_block, int);
180 static fibheapkey_t bb_to_key (basic_block);
181 static bool better_edge_p (const_basic_block, const_edge, int, int, int, int, const_edge);
182 static void connect_traces (int, struct trace *);
183 static bool copy_bb_p (const_basic_block, int);
184 static bool push_to_next_round_p (const_basic_block, int, int, int, gcov_type);
185 
186 /* Check to see if bb should be pushed into the next round of trace
187    collections or not.  Reasons for pushing the block forward are 1).
188    If the block is cold, we are doing partitioning, and there will be
189    another round (cold partition blocks are not supposed to be
190    collected into traces until the very last round); or 2). There will
191    be another round, and the basic block is not "hot enough" for the
192    current round of trace collection.  */
193 
194 static bool
195 push_to_next_round_p (const_basic_block bb, int round, int number_of_rounds,
196 		      int exec_th, gcov_type count_th)
197 {
198   bool there_exists_another_round;
199   bool block_not_hot_enough;
200 
201   there_exists_another_round = round < number_of_rounds - 1;
202 
203   block_not_hot_enough = (bb->frequency < exec_th
204 			  || bb->count < count_th
205 			  || probably_never_executed_bb_p (bb));
206 
207   if (there_exists_another_round
208       && block_not_hot_enough)
209     return true;
210   else
211     return false;
212 }
213 
214 /* Find the traces for Software Trace Cache.  Chain each trace through
215    RBI()->next.  Store the number of traces to N_TRACES and description of
216    traces to TRACES.  */
217 
218 static void
219 find_traces (int *n_traces, struct trace *traces)
220 {
221   int i;
222   int number_of_rounds;
223   edge e;
224   edge_iterator ei;
225   fibheap_t heap;
226 
227   /* Add one extra round of trace collection when partitioning hot/cold
228      basic blocks into separate sections.  The last round is for all the
229      cold blocks (and ONLY the cold blocks).  */
230 
231   number_of_rounds = N_ROUNDS - 1;
232 
233   /* Insert entry points of function into heap.  */
234   heap = fibheap_new ();
235   max_entry_frequency = 0;
236   max_entry_count = 0;
237   FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
238     {
239       bbd[e->dest->index].heap = heap;
240       bbd[e->dest->index].node = fibheap_insert (heap, bb_to_key (e->dest),
241 						    e->dest);
242       if (e->dest->frequency > max_entry_frequency)
243 	max_entry_frequency = e->dest->frequency;
244       if (e->dest->count > max_entry_count)
245 	max_entry_count = e->dest->count;
246     }
247 
248   /* Find the traces.  */
249   for (i = 0; i < number_of_rounds; i++)
250     {
251       gcov_type count_threshold;
252 
253       if (dump_file)
254 	fprintf (dump_file, "STC - round %d\n", i + 1);
255 
256       if (max_entry_count < INT_MAX / 1000)
257 	count_threshold = max_entry_count * exec_threshold[i] / 1000;
258       else
259 	count_threshold = max_entry_count / 1000 * exec_threshold[i];
260 
261       find_traces_1_round (REG_BR_PROB_BASE * branch_threshold[i] / 1000,
262 			   max_entry_frequency * exec_threshold[i] / 1000,
263 			   count_threshold, traces, n_traces, i, &heap,
264 			   number_of_rounds);
265     }
266   fibheap_delete (heap);
267 
268   if (dump_file)
269     {
270       for (i = 0; i < *n_traces; i++)
271 	{
272 	  basic_block bb;
273 	  fprintf (dump_file, "Trace %d (round %d):  ", i + 1,
274 		   traces[i].round + 1);
275 	  for (bb = traces[i].first; bb != traces[i].last; bb = (basic_block) bb->aux)
276 	    fprintf (dump_file, "%d [%d] ", bb->index, bb->frequency);
277 	  fprintf (dump_file, "%d [%d]\n", bb->index, bb->frequency);
278 	}
279       fflush (dump_file);
280     }
281 }
282 
283 /* Rotate loop whose back edge is BACK_EDGE in the tail of trace TRACE
284    (with sequential number TRACE_N).  */
285 
286 static basic_block
287 rotate_loop (edge back_edge, struct trace *trace, int trace_n)
288 {
289   basic_block bb;
290 
291   /* Information about the best end (end after rotation) of the loop.  */
292   basic_block best_bb = NULL;
293   edge best_edge = NULL;
294   int best_freq = -1;
295   gcov_type best_count = -1;
296   /* The best edge is preferred when its destination is not visited yet
297      or is a start block of some trace.  */
298   bool is_preferred = false;
299 
300   /* Find the most frequent edge that goes out from current trace.  */
301   bb = back_edge->dest;
302   do
303     {
304       edge e;
305       edge_iterator ei;
306 
307       FOR_EACH_EDGE (e, ei, bb->succs)
308 	if (e->dest != EXIT_BLOCK_PTR
309 	    && e->dest->il.rtl->visited != trace_n
310 	    && (e->flags & EDGE_CAN_FALLTHRU)
311 	    && !(e->flags & EDGE_COMPLEX))
312 	{
313 	  if (is_preferred)
314 	    {
315 	      /* The best edge is preferred.  */
316 	      if (!e->dest->il.rtl->visited
317 		  || bbd[e->dest->index].start_of_trace >= 0)
318 		{
319 		  /* The current edge E is also preferred.  */
320 		  int freq = EDGE_FREQUENCY (e);
321 		  if (freq > best_freq || e->count > best_count)
322 		    {
323 		      best_freq = freq;
324 		      best_count = e->count;
325 		      best_edge = e;
326 		      best_bb = bb;
327 		    }
328 		}
329 	    }
330 	  else
331 	    {
332 	      if (!e->dest->il.rtl->visited
333 		  || bbd[e->dest->index].start_of_trace >= 0)
334 		{
335 		  /* The current edge E is preferred.  */
336 		  is_preferred = true;
337 		  best_freq = EDGE_FREQUENCY (e);
338 		  best_count = e->count;
339 		  best_edge = e;
340 		  best_bb = bb;
341 		}
342 	      else
343 		{
344 		  int freq = EDGE_FREQUENCY (e);
345 		  if (!best_edge || freq > best_freq || e->count > best_count)
346 		    {
347 		      best_freq = freq;
348 		      best_count = e->count;
349 		      best_edge = e;
350 		      best_bb = bb;
351 		    }
352 		}
353 	    }
354 	}
355       bb = (basic_block) bb->aux;
356     }
357   while (bb != back_edge->dest);
358 
359   if (best_bb)
360     {
361       /* Rotate the loop so that the BEST_EDGE goes out from the last block of
362 	 the trace.  */
363       if (back_edge->dest == trace->first)
364 	{
365 	  trace->first = (basic_block) best_bb->aux;
366 	}
367       else
368 	{
369 	  basic_block prev_bb;
370 
371 	  for (prev_bb = trace->first;
372 	       prev_bb->aux != back_edge->dest;
373 	       prev_bb = (basic_block) prev_bb->aux)
374 	    ;
375 	  prev_bb->aux = best_bb->aux;
376 
377 	  /* Try to get rid of uncond jump to cond jump.  */
378 	  if (single_succ_p (prev_bb))
379 	    {
380 	      basic_block header = single_succ (prev_bb);
381 
382 	      /* Duplicate HEADER if it is a small block containing cond jump
383 		 in the end.  */
384 	      if (any_condjump_p (BB_END (header)) && copy_bb_p (header, 0)
385 		  && !find_reg_note (BB_END (header), REG_CROSSING_JUMP,
386 				     NULL_RTX))
387 		copy_bb (header, single_succ_edge (prev_bb), prev_bb, trace_n);
388 	    }
389 	}
390     }
391   else
392     {
393       /* We have not found suitable loop tail so do no rotation.  */
394       best_bb = back_edge->src;
395     }
396   best_bb->aux = NULL;
397   return best_bb;
398 }
399 
400 /* This function marks BB that it was visited in trace number TRACE.  */
401 
402 static void
403 mark_bb_visited (basic_block bb, int trace)
404 {
405   bb->il.rtl->visited = trace;
406   if (bbd[bb->index].heap)
407     {
408       fibheap_delete_node (bbd[bb->index].heap, bbd[bb->index].node);
409       bbd[bb->index].heap = NULL;
410       bbd[bb->index].node = NULL;
411     }
412 }
413 
414 /* One round of finding traces. Find traces for BRANCH_TH and EXEC_TH i.e. do
415    not include basic blocks their probability is lower than BRANCH_TH or their
416    frequency is lower than EXEC_TH into traces (or count is lower than
417    COUNT_TH).  It stores the new traces into TRACES and modifies the number of
418    traces *N_TRACES. Sets the round (which the trace belongs to) to ROUND. It
419    expects that starting basic blocks are in *HEAP and at the end it deletes
420    *HEAP and stores starting points for the next round into new *HEAP.  */
421 
422 static void
423 find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
424 		     struct trace *traces, int *n_traces, int round,
425 		     fibheap_t *heap, int number_of_rounds)
426 {
427   /* Heap for discarded basic blocks which are possible starting points for
428      the next round.  */
429   fibheap_t new_heap = fibheap_new ();
430 
431   while (!fibheap_empty (*heap))
432     {
433       basic_block bb;
434       struct trace *trace;
435       edge best_edge, e;
436       fibheapkey_t key;
437       edge_iterator ei;
438 
439       bb = (basic_block) fibheap_extract_min (*heap);
440       bbd[bb->index].heap = NULL;
441       bbd[bb->index].node = NULL;
442 
443       if (dump_file)
444 	fprintf (dump_file, "Getting bb %d\n", bb->index);
445 
446       /* If the BB's frequency is too low send BB to the next round.  When
447 	 partitioning hot/cold blocks into separate sections, make sure all
448 	 the cold blocks (and ONLY the cold blocks) go into the (extra) final
449 	 round.  */
450 
451       if (push_to_next_round_p (bb, round, number_of_rounds, exec_th,
452 				count_th))
453 	{
454 	  int key = bb_to_key (bb);
455 	  bbd[bb->index].heap = new_heap;
456 	  bbd[bb->index].node = fibheap_insert (new_heap, key, bb);
457 
458 	  if (dump_file)
459 	    fprintf (dump_file,
460 		     "  Possible start point of next round: %d (key: %d)\n",
461 		     bb->index, key);
462 	  continue;
463 	}
464 
465       trace = traces + *n_traces;
466       trace->first = bb;
467       trace->round = round;
468       trace->length = 0;
469       bbd[bb->index].in_trace = *n_traces;
470       (*n_traces)++;
471 
472       do
473 	{
474 	  int prob, freq;
475 	  bool ends_in_call;
476 
477 	  /* The probability and frequency of the best edge.  */
478 	  int best_prob = INT_MIN / 2;
479 	  int best_freq = INT_MIN / 2;
480 
481 	  best_edge = NULL;
482 	  mark_bb_visited (bb, *n_traces);
483 	  trace->length++;
484 
485 	  if (dump_file)
486 	    fprintf (dump_file, "Basic block %d was visited in trace %d\n",
487 		     bb->index, *n_traces - 1);
488 
489 	  ends_in_call = block_ends_with_call_p (bb);
490 
491 	  /* Select the successor that will be placed after BB.  */
492 	  FOR_EACH_EDGE (e, ei, bb->succs)
493 	    {
494 	      gcc_assert (!(e->flags & EDGE_FAKE));
495 
496 	      if (e->dest == EXIT_BLOCK_PTR)
497 		continue;
498 
499 	      if (e->dest->il.rtl->visited
500 		  && e->dest->il.rtl->visited != *n_traces)
501 		continue;
502 
503 	      if (BB_PARTITION (e->dest) != BB_PARTITION (bb))
504 		continue;
505 
506 	      prob = e->probability;
507 	      freq = e->dest->frequency;
508 
509 	      /* The only sensible preference for a call instruction is the
510 		 fallthru edge.  Don't bother selecting anything else.  */
511 	      if (ends_in_call)
512 		{
513 		  if (e->flags & EDGE_CAN_FALLTHRU)
514 		    {
515 		      best_edge = e;
516 		      best_prob = prob;
517 		      best_freq = freq;
518 		    }
519 		  continue;
520 		}
521 
522 	      /* Edge that cannot be fallthru or improbable or infrequent
523 		 successor (i.e. it is unsuitable successor).  */
524 	      if (!(e->flags & EDGE_CAN_FALLTHRU) || (e->flags & EDGE_COMPLEX)
525 		  || prob < branch_th || EDGE_FREQUENCY (e) < exec_th
526 		  || e->count < count_th)
527 		continue;
528 
529 	      /* If partitioning hot/cold basic blocks, don't consider edges
530 		 that cross section boundaries.  */
531 
532 	      if (better_edge_p (bb, e, prob, freq, best_prob, best_freq,
533 				 best_edge))
534 		{
535 		  best_edge = e;
536 		  best_prob = prob;
537 		  best_freq = freq;
538 		}
539 	    }
540 
541 	  /* If the best destination has multiple predecessors, and can be
542 	     duplicated cheaper than a jump, don't allow it to be added
543 	     to a trace.  We'll duplicate it when connecting traces.  */
544 	  if (best_edge && EDGE_COUNT (best_edge->dest->preds) >= 2
545 	      && copy_bb_p (best_edge->dest, 0))
546 	    best_edge = NULL;
547 
548 	  /* Add all non-selected successors to the heaps.  */
549 	  FOR_EACH_EDGE (e, ei, bb->succs)
550 	    {
551 	      if (e == best_edge
552 		  || e->dest == EXIT_BLOCK_PTR
553 		  || e->dest->il.rtl->visited)
554 		continue;
555 
556 	      key = bb_to_key (e->dest);
557 
558 	      if (bbd[e->dest->index].heap)
559 		{
560 		  /* E->DEST is already in some heap.  */
561 		  if (key != bbd[e->dest->index].node->key)
562 		    {
563 		      if (dump_file)
564 			{
565 			  fprintf (dump_file,
566 				   "Changing key for bb %d from %ld to %ld.\n",
567 				   e->dest->index,
568 				   (long) bbd[e->dest->index].node->key,
569 				   key);
570 			}
571 		      fibheap_replace_key (bbd[e->dest->index].heap,
572 					   bbd[e->dest->index].node, key);
573 		    }
574 		}
575 	      else
576 		{
577 		  fibheap_t which_heap = *heap;
578 
579 		  prob = e->probability;
580 		  freq = EDGE_FREQUENCY (e);
581 
582 		  if (!(e->flags & EDGE_CAN_FALLTHRU)
583 		      || (e->flags & EDGE_COMPLEX)
584 		      || prob < branch_th || freq < exec_th
585 		      || e->count < count_th)
586 		    {
587 		      /* When partitioning hot/cold basic blocks, make sure
588 			 the cold blocks (and only the cold blocks) all get
589 			 pushed to the last round of trace collection.  */
590 
591 		      if (push_to_next_round_p (e->dest, round,
592 						number_of_rounds,
593 						exec_th, count_th))
594 			which_heap = new_heap;
595 		    }
596 
597 		  bbd[e->dest->index].heap = which_heap;
598 		  bbd[e->dest->index].node = fibheap_insert (which_heap,
599 								key, e->dest);
600 
601 		  if (dump_file)
602 		    {
603 		      fprintf (dump_file,
604 			       "  Possible start of %s round: %d (key: %ld)\n",
605 			       (which_heap == new_heap) ? "next" : "this",
606 			       e->dest->index, (long) key);
607 		    }
608 
609 		}
610 	    }
611 
612 	  if (best_edge) /* Suitable successor was found.  */
613 	    {
614 	      if (best_edge->dest->il.rtl->visited == *n_traces)
615 		{
616 		  /* We do nothing with one basic block loops.  */
617 		  if (best_edge->dest != bb)
618 		    {
619 		      if (EDGE_FREQUENCY (best_edge)
620 			  > 4 * best_edge->dest->frequency / 5)
621 			{
622 			  /* The loop has at least 4 iterations.  If the loop
623 			     header is not the first block of the function
624 			     we can rotate the loop.  */
625 
626 			  if (best_edge->dest != ENTRY_BLOCK_PTR->next_bb)
627 			    {
628 			      if (dump_file)
629 				{
630 				  fprintf (dump_file,
631 					   "Rotating loop %d - %d\n",
632 					   best_edge->dest->index, bb->index);
633 				}
634 			      bb->aux = best_edge->dest;
635 			      bbd[best_edge->dest->index].in_trace =
636 							     (*n_traces) - 1;
637 			      bb = rotate_loop (best_edge, trace, *n_traces);
638 			    }
639 			}
640 		      else
641 			{
642 			  /* The loop has less than 4 iterations.  */
643 
644 			  if (single_succ_p (bb)
645 			      && copy_bb_p (best_edge->dest,
646 			      		    optimize_edge_for_speed_p (best_edge)))
647 			    {
648 			      bb = copy_bb (best_edge->dest, best_edge, bb,
649 					    *n_traces);
650 			      trace->length++;
651 			    }
652 			}
653 		    }
654 
655 		  /* Terminate the trace.  */
656 		  break;
657 		}
658 	      else
659 		{
660 		  /* Check for a situation
661 
662 		    A
663 		   /|
664 		  B |
665 		   \|
666 		    C
667 
668 		  where
669 		  EDGE_FREQUENCY (AB) + EDGE_FREQUENCY (BC)
670 		    >= EDGE_FREQUENCY (AC).
671 		  (i.e. 2 * B->frequency >= EDGE_FREQUENCY (AC) )
672 		  Best ordering is then A B C.
673 
674 		  This situation is created for example by:
675 
676 		  if (A) B;
677 		  C;
678 
679 		  */
680 
681 		  FOR_EACH_EDGE (e, ei, bb->succs)
682 		    if (e != best_edge
683 			&& (e->flags & EDGE_CAN_FALLTHRU)
684 			&& !(e->flags & EDGE_COMPLEX)
685 			&& !e->dest->il.rtl->visited
686 			&& single_pred_p (e->dest)
687 			&& !(e->flags & EDGE_CROSSING)
688 			&& single_succ_p (e->dest)
689 			&& (single_succ_edge (e->dest)->flags
690 			    & EDGE_CAN_FALLTHRU)
691 			&& !(single_succ_edge (e->dest)->flags & EDGE_COMPLEX)
692 			&& single_succ (e->dest) == best_edge->dest
693 			&& 2 * e->dest->frequency >= EDGE_FREQUENCY (best_edge))
694 		      {
695 			best_edge = e;
696 			if (dump_file)
697 			  fprintf (dump_file, "Selecting BB %d\n",
698 				   best_edge->dest->index);
699 			break;
700 		      }
701 
702 		  bb->aux = best_edge->dest;
703 		  bbd[best_edge->dest->index].in_trace = (*n_traces) - 1;
704 		  bb = best_edge->dest;
705 		}
706 	    }
707 	}
708       while (best_edge);
709       trace->last = bb;
710       bbd[trace->first->index].start_of_trace = *n_traces - 1;
711       bbd[trace->last->index].end_of_trace = *n_traces - 1;
712 
713       /* The trace is terminated so we have to recount the keys in heap
714 	 (some block can have a lower key because now one of its predecessors
715 	 is an end of the trace).  */
716       FOR_EACH_EDGE (e, ei, bb->succs)
717 	{
718 	  if (e->dest == EXIT_BLOCK_PTR
719 	      || e->dest->il.rtl->visited)
720 	    continue;
721 
722 	  if (bbd[e->dest->index].heap)
723 	    {
724 	      key = bb_to_key (e->dest);
725 	      if (key != bbd[e->dest->index].node->key)
726 		{
727 		  if (dump_file)
728 		    {
729 		      fprintf (dump_file,
730 			       "Changing key for bb %d from %ld to %ld.\n",
731 			       e->dest->index,
732 			       (long) bbd[e->dest->index].node->key, key);
733 		    }
734 		  fibheap_replace_key (bbd[e->dest->index].heap,
735 				       bbd[e->dest->index].node,
736 				       key);
737 		}
738 	    }
739 	}
740     }
741 
742   fibheap_delete (*heap);
743 
744   /* "Return" the new heap.  */
745   *heap = new_heap;
746 }
747 
748 /* Create a duplicate of the basic block OLD_BB and redirect edge E to it, add
749    it to trace after BB, mark OLD_BB visited and update pass' data structures
750    (TRACE is a number of trace which OLD_BB is duplicated to).  */
751 
752 static basic_block
753 copy_bb (basic_block old_bb, edge e, basic_block bb, int trace)
754 {
755   basic_block new_bb;
756 
757   new_bb = duplicate_block (old_bb, e, bb);
758   BB_COPY_PARTITION (new_bb, old_bb);
759 
760   gcc_assert (e->dest == new_bb);
761   gcc_assert (!e->dest->il.rtl->visited);
762 
763   if (dump_file)
764     fprintf (dump_file,
765 	     "Duplicated bb %d (created bb %d)\n",
766 	     old_bb->index, new_bb->index);
767   new_bb->il.rtl->visited = trace;
768   new_bb->aux = bb->aux;
769   bb->aux = new_bb;
770 
771   if (new_bb->index >= array_size || last_basic_block > array_size)
772     {
773       int i;
774       int new_size;
775 
776       new_size = MAX (last_basic_block, new_bb->index + 1);
777       new_size = GET_ARRAY_SIZE (new_size);
778       bbd = XRESIZEVEC (bbro_basic_block_data, bbd, new_size);
779       for (i = array_size; i < new_size; i++)
780 	{
781 	  bbd[i].start_of_trace = -1;
782 	  bbd[i].in_trace = -1;
783 	  bbd[i].end_of_trace = -1;
784 	  bbd[i].heap = NULL;
785 	  bbd[i].node = NULL;
786 	}
787       array_size = new_size;
788 
789       if (dump_file)
790 	{
791 	  fprintf (dump_file,
792 		   "Growing the dynamic array to %d elements.\n",
793 		   array_size);
794 	}
795     }
796 
797   bbd[new_bb->index].in_trace = trace;
798 
799   return new_bb;
800 }
801 
802 /* Compute and return the key (for the heap) of the basic block BB.  */
803 
804 static fibheapkey_t
805 bb_to_key (basic_block bb)
806 {
807   edge e;
808   edge_iterator ei;
809   int priority = 0;
810 
811   /* Do not start in probably never executed blocks.  */
812 
813   if (BB_PARTITION (bb) == BB_COLD_PARTITION
814       || probably_never_executed_bb_p (bb))
815     return BB_FREQ_MAX;
816 
817   /* Prefer blocks whose predecessor is an end of some trace
818      or whose predecessor edge is EDGE_DFS_BACK.  */
819   FOR_EACH_EDGE (e, ei, bb->preds)
820     {
821       if ((e->src != ENTRY_BLOCK_PTR && bbd[e->src->index].end_of_trace >= 0)
822 	  || (e->flags & EDGE_DFS_BACK))
823 	{
824 	  int edge_freq = EDGE_FREQUENCY (e);
825 
826 	  if (edge_freq > priority)
827 	    priority = edge_freq;
828 	}
829     }
830 
831   if (priority)
832     /* The block with priority should have significantly lower key.  */
833     return -(100 * BB_FREQ_MAX + 100 * priority + bb->frequency);
834   return -bb->frequency;
835 }
836 
837 /* Return true when the edge E from basic block BB is better than the temporary
838    best edge (details are in function).  The probability of edge E is PROB. The
839    frequency of the successor is FREQ.  The current best probability is
840    BEST_PROB, the best frequency is BEST_FREQ.
841    The edge is considered to be equivalent when PROB does not differ much from
842    BEST_PROB; similarly for frequency.  */
843 
844 static bool
845 better_edge_p (const_basic_block bb, const_edge e, int prob, int freq, int best_prob,
846 	       int best_freq, const_edge cur_best_edge)
847 {
848   bool is_better_edge;
849 
850   /* The BEST_* values do not have to be best, but can be a bit smaller than
851      maximum values.  */
852   int diff_prob = best_prob / 10;
853   int diff_freq = best_freq / 10;
854 
855   if (prob > best_prob + diff_prob)
856     /* The edge has higher probability than the temporary best edge.  */
857     is_better_edge = true;
858   else if (prob < best_prob - diff_prob)
859     /* The edge has lower probability than the temporary best edge.  */
860     is_better_edge = false;
861   else if (freq < best_freq - diff_freq)
862     /* The edge and the temporary best edge  have almost equivalent
863        probabilities.  The higher frequency of a successor now means
864        that there is another edge going into that successor.
865        This successor has lower frequency so it is better.  */
866     is_better_edge = true;
867   else if (freq > best_freq + diff_freq)
868     /* This successor has higher frequency so it is worse.  */
869     is_better_edge = false;
870   else if (e->dest->prev_bb == bb)
871     /* The edges have equivalent probabilities and the successors
872        have equivalent frequencies.  Select the previous successor.  */
873     is_better_edge = true;
874   else
875     is_better_edge = false;
876 
877   /* If we are doing hot/cold partitioning, make sure that we always favor
878      non-crossing edges over crossing edges.  */
879 
880   if (!is_better_edge
881       && flag_reorder_blocks_and_partition
882       && cur_best_edge
883       && (cur_best_edge->flags & EDGE_CROSSING)
884       && !(e->flags & EDGE_CROSSING))
885     is_better_edge = true;
886 
887   return is_better_edge;
888 }
889 
890 /* Connect traces in array TRACES, N_TRACES is the count of traces.  */
891 
892 static void
893 connect_traces (int n_traces, struct trace *traces)
894 {
895   int i;
896   bool *connected;
897   bool two_passes;
898   int last_trace;
899   int current_pass;
900   int current_partition;
901   int freq_threshold;
902   gcov_type count_threshold;
903 
904   freq_threshold = max_entry_frequency * DUPLICATION_THRESHOLD / 1000;
905   if (max_entry_count < INT_MAX / 1000)
906     count_threshold = max_entry_count * DUPLICATION_THRESHOLD / 1000;
907   else
908     count_threshold = max_entry_count / 1000 * DUPLICATION_THRESHOLD;
909 
910   connected = XCNEWVEC (bool, n_traces);
911   last_trace = -1;
912   current_pass = 1;
913   current_partition = BB_PARTITION (traces[0].first);
914   two_passes = false;
915 
916   if (flag_reorder_blocks_and_partition)
917     for (i = 0; i < n_traces && !two_passes; i++)
918       if (BB_PARTITION (traces[0].first)
919 	  != BB_PARTITION (traces[i].first))
920 	two_passes = true;
921 
922   for (i = 0; i < n_traces || (two_passes && current_pass == 1) ; i++)
923     {
924       int t = i;
925       int t2;
926       edge e, best;
927       int best_len;
928 
929       if (i >= n_traces)
930 	{
931 	  gcc_assert (two_passes && current_pass == 1);
932 	  i = 0;
933 	  t = i;
934 	  current_pass = 2;
935 	  if (current_partition == BB_HOT_PARTITION)
936 	    current_partition = BB_COLD_PARTITION;
937 	  else
938 	    current_partition = BB_HOT_PARTITION;
939 	}
940 
941       if (connected[t])
942 	continue;
943 
944       if (two_passes
945 	  && BB_PARTITION (traces[t].first) != current_partition)
946 	continue;
947 
948       connected[t] = true;
949 
950       /* Find the predecessor traces.  */
951       for (t2 = t; t2 > 0;)
952 	{
953 	  edge_iterator ei;
954 	  best = NULL;
955 	  best_len = 0;
956 	  FOR_EACH_EDGE (e, ei, traces[t2].first->preds)
957 	    {
958 	      int si = e->src->index;
959 
960 	      if (e->src != ENTRY_BLOCK_PTR
961 		  && (e->flags & EDGE_CAN_FALLTHRU)
962 		  && !(e->flags & EDGE_COMPLEX)
963 		  && bbd[si].end_of_trace >= 0
964 		  && !connected[bbd[si].end_of_trace]
965 		  && (BB_PARTITION (e->src) == current_partition)
966 		  && (!best
967 		      || e->probability > best->probability
968 		      || (e->probability == best->probability
969 			  && traces[bbd[si].end_of_trace].length > best_len)))
970 		{
971 		  best = e;
972 		  best_len = traces[bbd[si].end_of_trace].length;
973 		}
974 	    }
975 	  if (best)
976 	    {
977 	      best->src->aux = best->dest;
978 	      t2 = bbd[best->src->index].end_of_trace;
979 	      connected[t2] = true;
980 
981 	      if (dump_file)
982 		{
983 		  fprintf (dump_file, "Connection: %d %d\n",
984 			   best->src->index, best->dest->index);
985 		}
986 	    }
987 	  else
988 	    break;
989 	}
990 
991       if (last_trace >= 0)
992 	traces[last_trace].last->aux = traces[t2].first;
993       last_trace = t;
994 
995       /* Find the successor traces.  */
996       while (1)
997 	{
998 	  /* Find the continuation of the chain.  */
999 	  edge_iterator ei;
1000 	  best = NULL;
1001 	  best_len = 0;
1002 	  FOR_EACH_EDGE (e, ei, traces[t].last->succs)
1003 	    {
1004 	      int di = e->dest->index;
1005 
1006 	      if (e->dest != EXIT_BLOCK_PTR
1007 		  && (e->flags & EDGE_CAN_FALLTHRU)
1008 		  && !(e->flags & EDGE_COMPLEX)
1009 		  && bbd[di].start_of_trace >= 0
1010 		  && !connected[bbd[di].start_of_trace]
1011 		  && (BB_PARTITION (e->dest) == current_partition)
1012 		  && (!best
1013 		      || e->probability > best->probability
1014 		      || (e->probability == best->probability
1015 			  && traces[bbd[di].start_of_trace].length > best_len)))
1016 		{
1017 		  best = e;
1018 		  best_len = traces[bbd[di].start_of_trace].length;
1019 		}
1020 	    }
1021 
1022 	  if (best)
1023 	    {
1024 	      if (dump_file)
1025 		{
1026 		  fprintf (dump_file, "Connection: %d %d\n",
1027 			   best->src->index, best->dest->index);
1028 		}
1029 	      t = bbd[best->dest->index].start_of_trace;
1030 	      traces[last_trace].last->aux = traces[t].first;
1031 	      connected[t] = true;
1032 	      last_trace = t;
1033 	    }
1034 	  else
1035 	    {
1036 	      /* Try to connect the traces by duplication of 1 block.  */
1037 	      edge e2;
1038 	      basic_block next_bb = NULL;
1039 	      bool try_copy = false;
1040 
1041 	      FOR_EACH_EDGE (e, ei, traces[t].last->succs)
1042 		if (e->dest != EXIT_BLOCK_PTR
1043 		    && (e->flags & EDGE_CAN_FALLTHRU)
1044 		    && !(e->flags & EDGE_COMPLEX)
1045 		    && (!best || e->probability > best->probability))
1046 		  {
1047 		    edge_iterator ei;
1048 		    edge best2 = NULL;
1049 		    int best2_len = 0;
1050 
1051 		    /* If the destination is a start of a trace which is only
1052 		       one block long, then no need to search the successor
1053 		       blocks of the trace.  Accept it.  */
1054 		    if (bbd[e->dest->index].start_of_trace >= 0
1055 			&& traces[bbd[e->dest->index].start_of_trace].length
1056 			   == 1)
1057 		      {
1058 			best = e;
1059 			try_copy = true;
1060 			continue;
1061 		      }
1062 
1063 		    FOR_EACH_EDGE (e2, ei, e->dest->succs)
1064 		      {
1065 			int di = e2->dest->index;
1066 
1067 			if (e2->dest == EXIT_BLOCK_PTR
1068 			    || ((e2->flags & EDGE_CAN_FALLTHRU)
1069 				&& !(e2->flags & EDGE_COMPLEX)
1070 				&& bbd[di].start_of_trace >= 0
1071 				&& !connected[bbd[di].start_of_trace]
1072 				&& (BB_PARTITION (e2->dest) == current_partition)
1073 				&& (EDGE_FREQUENCY (e2) >= freq_threshold)
1074 				&& (e2->count >= count_threshold)
1075 				&& (!best2
1076 				    || e2->probability > best2->probability
1077 				    || (e2->probability == best2->probability
1078 					&& traces[bbd[di].start_of_trace].length
1079 					   > best2_len))))
1080 			  {
1081 			    best = e;
1082 			    best2 = e2;
1083 			    if (e2->dest != EXIT_BLOCK_PTR)
1084 			      best2_len = traces[bbd[di].start_of_trace].length;
1085 			    else
1086 			      best2_len = INT_MAX;
1087 			    next_bb = e2->dest;
1088 			    try_copy = true;
1089 			  }
1090 		      }
1091 		  }
1092 
1093 	      if (flag_reorder_blocks_and_partition)
1094 		try_copy = false;
1095 
1096 	      /* Copy tiny blocks always; copy larger blocks only when the
1097 		 edge is traversed frequently enough.  */
1098 	      if (try_copy
1099 		  && copy_bb_p (best->dest,
1100 				optimize_edge_for_speed_p (best)
1101 				&& EDGE_FREQUENCY (best) >= freq_threshold
1102 				&& best->count >= count_threshold))
1103 		{
1104 		  basic_block new_bb;
1105 
1106 		  if (dump_file)
1107 		    {
1108 		      fprintf (dump_file, "Connection: %d %d ",
1109 			       traces[t].last->index, best->dest->index);
1110 		      if (!next_bb)
1111 			fputc ('\n', dump_file);
1112 		      else if (next_bb == EXIT_BLOCK_PTR)
1113 			fprintf (dump_file, "exit\n");
1114 		      else
1115 			fprintf (dump_file, "%d\n", next_bb->index);
1116 		    }
1117 
1118 		  new_bb = copy_bb (best->dest, best, traces[t].last, t);
1119 		  traces[t].last = new_bb;
1120 		  if (next_bb && next_bb != EXIT_BLOCK_PTR)
1121 		    {
1122 		      t = bbd[next_bb->index].start_of_trace;
1123 		      traces[last_trace].last->aux = traces[t].first;
1124 		      connected[t] = true;
1125 		      last_trace = t;
1126 		    }
1127 		  else
1128 		    break;	/* Stop finding the successor traces.  */
1129 		}
1130 	      else
1131 		break;	/* Stop finding the successor traces.  */
1132 	    }
1133 	}
1134     }
1135 
1136   if (dump_file)
1137     {
1138       basic_block bb;
1139 
1140       fprintf (dump_file, "Final order:\n");
1141       for (bb = traces[0].first; bb; bb = (basic_block) bb->aux)
1142 	fprintf (dump_file, "%d ", bb->index);
1143       fprintf (dump_file, "\n");
1144       fflush (dump_file);
1145     }
1146 
1147   FREE (connected);
1148 }
1149 
1150 /* Return true when BB can and should be copied. CODE_MAY_GROW is true
1151    when code size is allowed to grow by duplication.  */
1152 
1153 static bool
1154 copy_bb_p (const_basic_block bb, int code_may_grow)
1155 {
1156   int size = 0;
1157   int max_size = uncond_jump_length;
1158   rtx insn;
1159 
1160   if (!bb->frequency)
1161     return false;
1162   if (EDGE_COUNT (bb->preds) < 2)
1163     return false;
1164   if (!can_duplicate_block_p (bb))
1165     return false;
1166 
1167   /* Avoid duplicating blocks which have many successors (PR/13430).  */
1168   if (EDGE_COUNT (bb->succs) > 8)
1169     return false;
1170 
1171   if (code_may_grow && optimize_bb_for_speed_p (bb))
1172     max_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
1173 
1174   FOR_BB_INSNS (bb, insn)
1175     {
1176       if (INSN_P (insn))
1177 	size += get_attr_min_length (insn);
1178     }
1179 
1180   if (size <= max_size)
1181     return true;
1182 
1183   if (dump_file)
1184     {
1185       fprintf (dump_file,
1186 	       "Block %d can't be copied because its size = %d.\n",
1187 	       bb->index, size);
1188     }
1189 
1190   return false;
1191 }
1192 
1193 /* Return the length of unconditional jump instruction.  */
1194 
1195 int
1196 get_uncond_jump_length (void)
1197 {
1198   rtx label, jump;
1199   int length;
1200 
1201   label = emit_label_before (gen_label_rtx (), get_insns ());
1202   jump = emit_jump_insn (gen_jump (label));
1203 
1204   length = get_attr_min_length (jump);
1205 
1206   delete_insn (jump);
1207   delete_insn (label);
1208   return length;
1209 }
1210 
1211 /* Emit a barrier into the footer of BB.  */
1212 
1213 static void
1214 emit_barrier_after_bb (basic_block bb)
1215 {
1216   rtx barrier = emit_barrier_after (BB_END (bb));
1217   bb->il.rtl->footer = unlink_insn_chain (barrier, barrier);
1218 }
1219 
1220 /* The landing pad OLD_LP, in block OLD_BB, has edges from both partitions.
1221    Duplicate the landing pad and split the edges so that no EH edge
1222    crosses partitions.  */
1223 
1224 static void
1225 fix_up_crossing_landing_pad (eh_landing_pad old_lp, basic_block old_bb)
1226 {
1227   eh_landing_pad new_lp;
1228   basic_block new_bb, last_bb, post_bb;
1229   rtx new_label, jump, post_label;
1230   unsigned new_partition;
1231   edge_iterator ei;
1232   edge e;
1233 
1234   /* Generate the new landing-pad structure.  */
1235   new_lp = gen_eh_landing_pad (old_lp->region);
1236   new_lp->post_landing_pad = old_lp->post_landing_pad;
1237   new_lp->landing_pad = gen_label_rtx ();
1238   LABEL_PRESERVE_P (new_lp->landing_pad) = 1;
1239 
1240   /* Put appropriate instructions in new bb.  */
1241   new_label = emit_label (new_lp->landing_pad);
1242 
1243   expand_dw2_landing_pad_for_region (old_lp->region);
1244 
1245   post_bb = BLOCK_FOR_INSN (old_lp->landing_pad);
1246   post_bb = single_succ (post_bb);
1247   post_label = block_label (post_bb);
1248   jump = emit_jump_insn (gen_jump (post_label));
1249   JUMP_LABEL (jump) = post_label;
1250 
1251   /* Create new basic block to be dest for lp.  */
1252   last_bb = EXIT_BLOCK_PTR->prev_bb;
1253   new_bb = create_basic_block (new_label, jump, last_bb);
1254   new_bb->aux = last_bb->aux;
1255   last_bb->aux = new_bb;
1256 
1257   emit_barrier_after_bb (new_bb);
1258 
1259   make_edge (new_bb, post_bb, 0);
1260 
1261   /* Make sure new bb is in the other partition.  */
1262   new_partition = BB_PARTITION (old_bb);
1263   new_partition ^= BB_HOT_PARTITION | BB_COLD_PARTITION;
1264   BB_SET_PARTITION (new_bb, new_partition);
1265 
1266   /* Fix up the edges.  */
1267   for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)) != NULL; )
1268     if (BB_PARTITION (e->src) == new_partition)
1269       {
1270 	rtx insn = BB_END (e->src);
1271 	rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
1272 
1273 	gcc_assert (note != NULL);
1274 	gcc_checking_assert (INTVAL (XEXP (note, 0)) == old_lp->index);
1275 	XEXP (note, 0) = GEN_INT (new_lp->index);
1276 
1277 	/* Adjust the edge to the new destination.  */
1278 	redirect_edge_succ (e, new_bb);
1279       }
1280     else
1281       ei_next (&ei);
1282 }
1283 
1284 /* Find the basic blocks that are rarely executed and need to be moved to
1285    a separate section of the .o file (to cut down on paging and improve
1286    cache locality).  Return a vector of all edges that cross.  */
1287 
1288 static VEC(edge, heap) *
1289 find_rarely_executed_basic_blocks_and_crossing_edges (void)
1290 {
1291   VEC(edge, heap) *crossing_edges = NULL;
1292   basic_block bb;
1293   edge e;
1294   edge_iterator ei;
1295 
1296   /* Mark which partition (hot/cold) each basic block belongs in.  */
1297   FOR_EACH_BB (bb)
1298     {
1299       if (probably_never_executed_bb_p (bb))
1300 	BB_SET_PARTITION (bb, BB_COLD_PARTITION);
1301       else
1302 	BB_SET_PARTITION (bb, BB_HOT_PARTITION);
1303     }
1304 
1305   /* The format of .gcc_except_table does not allow landing pads to
1306      be in a different partition as the throw.  Fix this by either
1307      moving or duplicating the landing pads.  */
1308   if (cfun->eh->lp_array)
1309     {
1310       unsigned i;
1311       eh_landing_pad lp;
1312 
1313       FOR_EACH_VEC_ELT (eh_landing_pad, cfun->eh->lp_array, i, lp)
1314 	{
1315 	  bool all_same, all_diff;
1316 
1317 	  if (lp == NULL
1318 	      || lp->landing_pad == NULL_RTX
1319 	      || !LABEL_P (lp->landing_pad))
1320 	    continue;
1321 
1322 	  all_same = all_diff = true;
1323 	  bb = BLOCK_FOR_INSN (lp->landing_pad);
1324 	  FOR_EACH_EDGE (e, ei, bb->preds)
1325 	    {
1326 	      gcc_assert (e->flags & EDGE_EH);
1327 	      if (BB_PARTITION (bb) == BB_PARTITION (e->src))
1328 		all_diff = false;
1329 	      else
1330 		all_same = false;
1331 	    }
1332 
1333 	  if (all_same)
1334 	    ;
1335 	  else if (all_diff)
1336 	    {
1337 	      int which = BB_PARTITION (bb);
1338 	      which ^= BB_HOT_PARTITION | BB_COLD_PARTITION;
1339 	      BB_SET_PARTITION (bb, which);
1340 	    }
1341 	  else
1342 	    fix_up_crossing_landing_pad (lp, bb);
1343 	}
1344     }
1345 
1346   /* Mark every edge that crosses between sections.  */
1347 
1348   FOR_EACH_BB (bb)
1349     FOR_EACH_EDGE (e, ei, bb->succs)
1350       {
1351 	unsigned int flags = e->flags;
1352 
1353         /* We should never have EDGE_CROSSING set yet.  */
1354 	gcc_checking_assert ((flags & EDGE_CROSSING) == 0);
1355 
1356 	if (e->src != ENTRY_BLOCK_PTR
1357 	    && e->dest != EXIT_BLOCK_PTR
1358 	    && BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1359 	  {
1360 	    VEC_safe_push (edge, heap, crossing_edges, e);
1361 	    flags |= EDGE_CROSSING;
1362 	  }
1363 
1364 	/* Now that we've split eh edges as appropriate, allow landing pads
1365 	   to be merged with the post-landing pads.  */
1366 	flags &= ~EDGE_PRESERVE;
1367 
1368 	e->flags = flags;
1369       }
1370 
1371   return crossing_edges;
1372 }
1373 
1374 /* If any destination of a crossing edge does not have a label, add label;
1375    Convert any easy fall-through crossing edges to unconditional jumps.  */
1376 
1377 static void
1378 add_labels_and_missing_jumps (VEC(edge, heap) *crossing_edges)
1379 {
1380   size_t i;
1381   edge e;
1382 
1383   FOR_EACH_VEC_ELT (edge, crossing_edges, i, e)
1384     {
1385       basic_block src = e->src;
1386       basic_block dest = e->dest;
1387       rtx label, new_jump;
1388 
1389       if (dest == EXIT_BLOCK_PTR)
1390 	continue;
1391 
1392       /* Make sure dest has a label.  */
1393       label = block_label (dest);
1394 
1395       /* Nothing to do for non-fallthru edges.  */
1396       if (src == ENTRY_BLOCK_PTR)
1397 	continue;
1398       if ((e->flags & EDGE_FALLTHRU) == 0)
1399 	continue;
1400 
1401       /* If the block does not end with a control flow insn, then we
1402 	 can trivially add a jump to the end to fixup the crossing.
1403 	 Otherwise the jump will have to go in a new bb, which will
1404 	 be handled by fix_up_fall_thru_edges function.  */
1405       if (control_flow_insn_p (BB_END (src)))
1406 	continue;
1407 
1408       /* Make sure there's only one successor.  */
1409       gcc_assert (single_succ_p (src));
1410 
1411       new_jump = emit_jump_insn_after (gen_jump (label), BB_END (src));
1412       BB_END (src) = new_jump;
1413       JUMP_LABEL (new_jump) = label;
1414       LABEL_NUSES (label) += 1;
1415 
1416       emit_barrier_after_bb (src);
1417 
1418       /* Mark edge as non-fallthru.  */
1419       e->flags &= ~EDGE_FALLTHRU;
1420     }
1421 }
1422 
1423 /* Find any bb's where the fall-through edge is a crossing edge (note that
1424    these bb's must also contain a conditional jump or end with a call
1425    instruction; we've already dealt with fall-through edges for blocks
1426    that didn't have a conditional jump or didn't end with call instruction
1427    in the call to add_labels_and_missing_jumps).  Convert the fall-through
1428    edge to non-crossing edge by inserting a new bb to fall-through into.
1429    The new bb will contain an unconditional jump (crossing edge) to the
1430    original fall through destination.  */
1431 
1432 static void
1433 fix_up_fall_thru_edges (void)
1434 {
1435   basic_block cur_bb;
1436   basic_block new_bb;
1437   edge succ1;
1438   edge succ2;
1439   edge fall_thru;
1440   edge cond_jump = NULL;
1441   edge e;
1442   bool cond_jump_crosses;
1443   int invert_worked;
1444   rtx old_jump;
1445   rtx fall_thru_label;
1446 
1447   FOR_EACH_BB (cur_bb)
1448     {
1449       fall_thru = NULL;
1450       if (EDGE_COUNT (cur_bb->succs) > 0)
1451 	succ1 = EDGE_SUCC (cur_bb, 0);
1452       else
1453 	succ1 = NULL;
1454 
1455       if (EDGE_COUNT (cur_bb->succs) > 1)
1456 	succ2 = EDGE_SUCC (cur_bb, 1);
1457       else
1458 	succ2 = NULL;
1459 
1460       /* Find the fall-through edge.  */
1461 
1462       if (succ1
1463 	  && (succ1->flags & EDGE_FALLTHRU))
1464 	{
1465 	  fall_thru = succ1;
1466 	  cond_jump = succ2;
1467 	}
1468       else if (succ2
1469 	       && (succ2->flags & EDGE_FALLTHRU))
1470 	{
1471 	  fall_thru = succ2;
1472 	  cond_jump = succ1;
1473 	}
1474       else if (succ1
1475 	       && (block_ends_with_call_p (cur_bb)
1476 		   || can_throw_internal (BB_END (cur_bb))))
1477 	{
1478 	  edge e;
1479 	  edge_iterator ei;
1480 
1481 	  /* Find EDGE_CAN_FALLTHRU edge.  */
1482 	  FOR_EACH_EDGE (e, ei, cur_bb->succs)
1483 	    if (e->flags & EDGE_CAN_FALLTHRU)
1484 	      {
1485 		fall_thru = e;
1486 		break;
1487 	      }
1488 	}
1489 
1490       if (fall_thru && (fall_thru->dest != EXIT_BLOCK_PTR))
1491 	{
1492 	  /* Check to see if the fall-thru edge is a crossing edge.  */
1493 
1494 	  if (fall_thru->flags & EDGE_CROSSING)
1495 	    {
1496 	      /* The fall_thru edge crosses; now check the cond jump edge, if
1497 		 it exists.  */
1498 
1499 	      cond_jump_crosses = true;
1500 	      invert_worked  = 0;
1501 	      old_jump = BB_END (cur_bb);
1502 
1503 	      /* Find the jump instruction, if there is one.  */
1504 
1505 	      if (cond_jump)
1506 		{
1507 		  if (!(cond_jump->flags & EDGE_CROSSING))
1508 		    cond_jump_crosses = false;
1509 
1510 		  /* We know the fall-thru edge crosses; if the cond
1511 		     jump edge does NOT cross, and its destination is the
1512 		     next block in the bb order, invert the jump
1513 		     (i.e. fix it so the fall thru does not cross and
1514 		     the cond jump does).  */
1515 
1516 		  if (!cond_jump_crosses
1517 		      && cur_bb->aux == cond_jump->dest)
1518 		    {
1519 		      /* Find label in fall_thru block. We've already added
1520 			 any missing labels, so there must be one.  */
1521 
1522 		      fall_thru_label = block_label (fall_thru->dest);
1523 
1524 		      if (old_jump && JUMP_P (old_jump) && fall_thru_label)
1525 			invert_worked = invert_jump (old_jump,
1526 						     fall_thru_label,0);
1527 		      if (invert_worked)
1528 			{
1529 			  fall_thru->flags &= ~EDGE_FALLTHRU;
1530 			  cond_jump->flags |= EDGE_FALLTHRU;
1531 			  update_br_prob_note (cur_bb);
1532 			  e = fall_thru;
1533 			  fall_thru = cond_jump;
1534 			  cond_jump = e;
1535 			  cond_jump->flags |= EDGE_CROSSING;
1536 			  fall_thru->flags &= ~EDGE_CROSSING;
1537 			}
1538 		    }
1539 		}
1540 
1541 	      if (cond_jump_crosses || !invert_worked)
1542 		{
1543 		  /* This is the case where both edges out of the basic
1544 		     block are crossing edges. Here we will fix up the
1545 		     fall through edge. The jump edge will be taken care
1546 		     of later.  The EDGE_CROSSING flag of fall_thru edge
1547                      is unset before the call to force_nonfallthru
1548                      function because if a new basic-block is created
1549                      this edge remains in the current section boundary
1550                      while the edge between new_bb and the fall_thru->dest
1551                      becomes EDGE_CROSSING.  */
1552 
1553                   fall_thru->flags &= ~EDGE_CROSSING;
1554 		  new_bb = force_nonfallthru (fall_thru);
1555 
1556 		  if (new_bb)
1557 		    {
1558 		      new_bb->aux = cur_bb->aux;
1559 		      cur_bb->aux = new_bb;
1560 
1561 		      /* Make sure new fall-through bb is in same
1562 			 partition as bb it's falling through from.  */
1563 
1564 		      BB_COPY_PARTITION (new_bb, cur_bb);
1565 		      single_succ_edge (new_bb)->flags |= EDGE_CROSSING;
1566 		    }
1567                   else
1568                     {
1569                       /* If a new basic-block was not created; restore
1570                          the EDGE_CROSSING flag.  */
1571                       fall_thru->flags |= EDGE_CROSSING;
1572                     }
1573 
1574 		  /* Add barrier after new jump */
1575 		  emit_barrier_after_bb (new_bb ? new_bb : cur_bb);
1576 		}
1577 	    }
1578 	}
1579     }
1580 }
1581 
1582 /* This function checks the destination block of a "crossing jump" to
1583    see if it has any crossing predecessors that begin with a code label
1584    and end with an unconditional jump.  If so, it returns that predecessor
1585    block.  (This is to avoid creating lots of new basic blocks that all
1586    contain unconditional jumps to the same destination).  */
1587 
1588 static basic_block
1589 find_jump_block (basic_block jump_dest)
1590 {
1591   basic_block source_bb = NULL;
1592   edge e;
1593   rtx insn;
1594   edge_iterator ei;
1595 
1596   FOR_EACH_EDGE (e, ei, jump_dest->preds)
1597     if (e->flags & EDGE_CROSSING)
1598       {
1599 	basic_block src = e->src;
1600 
1601 	/* Check each predecessor to see if it has a label, and contains
1602 	   only one executable instruction, which is an unconditional jump.
1603 	   If so, we can use it.  */
1604 
1605 	if (LABEL_P (BB_HEAD (src)))
1606 	  for (insn = BB_HEAD (src);
1607 	       !INSN_P (insn) && insn != NEXT_INSN (BB_END (src));
1608 	       insn = NEXT_INSN (insn))
1609 	    {
1610 	      if (INSN_P (insn)
1611 		  && insn == BB_END (src)
1612 		  && JUMP_P (insn)
1613 		  && !any_condjump_p (insn))
1614 		{
1615 		  source_bb = src;
1616 		  break;
1617 		}
1618 	    }
1619 
1620 	if (source_bb)
1621 	  break;
1622       }
1623 
1624   return source_bb;
1625 }
1626 
1627 /* Find all BB's with conditional jumps that are crossing edges;
1628    insert a new bb and make the conditional jump branch to the new
1629    bb instead (make the new bb same color so conditional branch won't
1630    be a 'crossing' edge).  Insert an unconditional jump from the
1631    new bb to the original destination of the conditional jump.  */
1632 
1633 static void
1634 fix_crossing_conditional_branches (void)
1635 {
1636   basic_block cur_bb;
1637   basic_block new_bb;
1638   basic_block dest;
1639   edge succ1;
1640   edge succ2;
1641   edge crossing_edge;
1642   edge new_edge;
1643   rtx old_jump;
1644   rtx set_src;
1645   rtx old_label = NULL_RTX;
1646   rtx new_label;
1647 
1648   FOR_EACH_BB (cur_bb)
1649     {
1650       crossing_edge = NULL;
1651       if (EDGE_COUNT (cur_bb->succs) > 0)
1652 	succ1 = EDGE_SUCC (cur_bb, 0);
1653       else
1654 	succ1 = NULL;
1655 
1656       if (EDGE_COUNT (cur_bb->succs) > 1)
1657 	succ2 = EDGE_SUCC (cur_bb, 1);
1658       else
1659 	succ2 = NULL;
1660 
1661       /* We already took care of fall-through edges, so only one successor
1662 	 can be a crossing edge.  */
1663 
1664       if (succ1 && (succ1->flags & EDGE_CROSSING))
1665 	crossing_edge = succ1;
1666       else if (succ2 && (succ2->flags & EDGE_CROSSING))
1667 	crossing_edge = succ2;
1668 
1669       if (crossing_edge)
1670 	{
1671 	  old_jump = BB_END (cur_bb);
1672 
1673 	  /* Check to make sure the jump instruction is a
1674 	     conditional jump.  */
1675 
1676 	  set_src = NULL_RTX;
1677 
1678 	  if (any_condjump_p (old_jump))
1679 	    {
1680 	      if (GET_CODE (PATTERN (old_jump)) == SET)
1681 		set_src = SET_SRC (PATTERN (old_jump));
1682 	      else if (GET_CODE (PATTERN (old_jump)) == PARALLEL)
1683 		{
1684 		  set_src = XVECEXP (PATTERN (old_jump), 0,0);
1685 		  if (GET_CODE (set_src) == SET)
1686 		    set_src = SET_SRC (set_src);
1687 		  else
1688 		    set_src = NULL_RTX;
1689 		}
1690 	    }
1691 
1692 	  if (set_src && (GET_CODE (set_src) == IF_THEN_ELSE))
1693 	    {
1694 	      if (GET_CODE (XEXP (set_src, 1)) == PC)
1695 		old_label = XEXP (set_src, 2);
1696 	      else if (GET_CODE (XEXP (set_src, 2)) == PC)
1697 		old_label = XEXP (set_src, 1);
1698 
1699 	      /* Check to see if new bb for jumping to that dest has
1700 		 already been created; if so, use it; if not, create
1701 		 a new one.  */
1702 
1703 	      new_bb = find_jump_block (crossing_edge->dest);
1704 
1705 	      if (new_bb)
1706 		new_label = block_label (new_bb);
1707 	      else
1708 		{
1709 		  basic_block last_bb;
1710 		  rtx new_jump;
1711 
1712 		  /* Create new basic block to be dest for
1713 		     conditional jump.  */
1714 
1715 		  /* Put appropriate instructions in new bb.  */
1716 
1717 		  new_label = gen_label_rtx ();
1718 		  emit_label (new_label);
1719 
1720 		  gcc_assert (GET_CODE (old_label) == LABEL_REF);
1721 		  old_label = JUMP_LABEL (old_jump);
1722 		  new_jump = emit_jump_insn (gen_jump (old_label));
1723 		  JUMP_LABEL (new_jump) = old_label;
1724 
1725 		  last_bb = EXIT_BLOCK_PTR->prev_bb;
1726 		  new_bb = create_basic_block (new_label, new_jump, last_bb);
1727 		  new_bb->aux = last_bb->aux;
1728 		  last_bb->aux = new_bb;
1729 
1730 		  emit_barrier_after_bb (new_bb);
1731 
1732 		  /* Make sure new bb is in same partition as source
1733 		     of conditional branch.  */
1734 		  BB_COPY_PARTITION (new_bb, cur_bb);
1735 		}
1736 
1737 	      /* Make old jump branch to new bb.  */
1738 
1739 	      redirect_jump (old_jump, new_label, 0);
1740 
1741 	      /* Remove crossing_edge as predecessor of 'dest'.  */
1742 
1743 	      dest = crossing_edge->dest;
1744 
1745 	      redirect_edge_succ (crossing_edge, new_bb);
1746 
1747 	      /* Make a new edge from new_bb to old dest; new edge
1748 		 will be a successor for new_bb and a predecessor
1749 		 for 'dest'.  */
1750 
1751 	      if (EDGE_COUNT (new_bb->succs) == 0)
1752 		new_edge = make_edge (new_bb, dest, 0);
1753 	      else
1754 		new_edge = EDGE_SUCC (new_bb, 0);
1755 
1756 	      crossing_edge->flags &= ~EDGE_CROSSING;
1757 	      new_edge->flags |= EDGE_CROSSING;
1758 	    }
1759 	}
1760     }
1761 }
1762 
1763 /* Find any unconditional branches that cross between hot and cold
1764    sections.  Convert them into indirect jumps instead.  */
1765 
1766 static void
1767 fix_crossing_unconditional_branches (void)
1768 {
1769   basic_block cur_bb;
1770   rtx last_insn;
1771   rtx label;
1772   rtx label_addr;
1773   rtx indirect_jump_sequence;
1774   rtx jump_insn = NULL_RTX;
1775   rtx new_reg;
1776   rtx cur_insn;
1777   edge succ;
1778 
1779   FOR_EACH_BB (cur_bb)
1780     {
1781       last_insn = BB_END (cur_bb);
1782 
1783       if (EDGE_COUNT (cur_bb->succs) < 1)
1784 	continue;
1785 
1786       succ = EDGE_SUCC (cur_bb, 0);
1787 
1788       /* Check to see if bb ends in a crossing (unconditional) jump.  At
1789 	 this point, no crossing jumps should be conditional.  */
1790 
1791       if (JUMP_P (last_insn)
1792 	  && (succ->flags & EDGE_CROSSING))
1793 	{
1794 	  rtx label2, table;
1795 
1796 	  gcc_assert (!any_condjump_p (last_insn));
1797 
1798 	  /* Make sure the jump is not already an indirect or table jump.  */
1799 
1800 	  if (!computed_jump_p (last_insn)
1801 	      && !tablejump_p (last_insn, &label2, &table))
1802 	    {
1803 	      /* We have found a "crossing" unconditional branch.  Now
1804 		 we must convert it to an indirect jump.  First create
1805 		 reference of label, as target for jump.  */
1806 
1807 	      label = JUMP_LABEL (last_insn);
1808 	      label_addr = gen_rtx_LABEL_REF (Pmode, label);
1809 	      LABEL_NUSES (label) += 1;
1810 
1811 	      /* Get a register to use for the indirect jump.  */
1812 
1813 	      new_reg = gen_reg_rtx (Pmode);
1814 
1815 	      /* Generate indirect the jump sequence.  */
1816 
1817 	      start_sequence ();
1818 	      emit_move_insn (new_reg, label_addr);
1819 	      emit_indirect_jump (new_reg);
1820 	      indirect_jump_sequence = get_insns ();
1821 	      end_sequence ();
1822 
1823 	      /* Make sure every instruction in the new jump sequence has
1824 		 its basic block set to be cur_bb.  */
1825 
1826 	      for (cur_insn = indirect_jump_sequence; cur_insn;
1827 		   cur_insn = NEXT_INSN (cur_insn))
1828 		{
1829 		  if (!BARRIER_P (cur_insn))
1830 		    BLOCK_FOR_INSN (cur_insn) = cur_bb;
1831 		  if (JUMP_P (cur_insn))
1832 		    jump_insn = cur_insn;
1833 		}
1834 
1835 	      /* Insert the new (indirect) jump sequence immediately before
1836 		 the unconditional jump, then delete the unconditional jump.  */
1837 
1838 	      emit_insn_before (indirect_jump_sequence, last_insn);
1839 	      delete_insn (last_insn);
1840 
1841 	      /* Make BB_END for cur_bb be the jump instruction (NOT the
1842 		 barrier instruction at the end of the sequence...).  */
1843 
1844 	      BB_END (cur_bb) = jump_insn;
1845 	    }
1846 	}
1847     }
1848 }
1849 
1850 /* Add REG_CROSSING_JUMP note to all crossing jump insns.  */
1851 
1852 static void
1853 add_reg_crossing_jump_notes (void)
1854 {
1855   basic_block bb;
1856   edge e;
1857   edge_iterator ei;
1858 
1859   FOR_EACH_BB (bb)
1860     FOR_EACH_EDGE (e, ei, bb->succs)
1861       if ((e->flags & EDGE_CROSSING)
1862 	  && JUMP_P (BB_END (e->src)))
1863 	add_reg_note (BB_END (e->src), REG_CROSSING_JUMP, NULL_RTX);
1864 }
1865 
1866 /* Verify, in the basic block chain, that there is at most one switch
1867    between hot/cold partitions. This is modelled on
1868    rtl_verify_flow_info_1, but it cannot go inside that function
1869    because this condition will not be true until after
1870    reorder_basic_blocks is called.  */
1871 
1872 static void
1873 verify_hot_cold_block_grouping (void)
1874 {
1875   basic_block bb;
1876   int err = 0;
1877   bool switched_sections = false;
1878   int current_partition = 0;
1879 
1880   FOR_EACH_BB (bb)
1881     {
1882       if (!current_partition)
1883 	current_partition = BB_PARTITION (bb);
1884       if (BB_PARTITION (bb) != current_partition)
1885 	{
1886 	  if (switched_sections)
1887 	    {
1888 	      error ("multiple hot/cold transitions found (bb %i)",
1889 		     bb->index);
1890 	      err = 1;
1891 	    }
1892 	  else
1893 	    {
1894 	      switched_sections = true;
1895 	      current_partition = BB_PARTITION (bb);
1896 	    }
1897 	}
1898     }
1899 
1900   gcc_assert(!err);
1901 }
1902 
1903 /* Reorder basic blocks.  The main entry point to this file.  FLAGS is
1904    the set of flags to pass to cfg_layout_initialize().  */
1905 
1906 void
1907 reorder_basic_blocks (void)
1908 {
1909   int n_traces;
1910   int i;
1911   struct trace *traces;
1912 
1913   gcc_assert (current_ir_type () == IR_RTL_CFGLAYOUT);
1914 
1915   if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
1916     return;
1917 
1918   set_edge_can_fallthru_flag ();
1919   mark_dfs_back_edges ();
1920 
1921   /* We are estimating the length of uncond jump insn only once since the code
1922      for getting the insn length always returns the minimal length now.  */
1923   if (uncond_jump_length == 0)
1924     uncond_jump_length = get_uncond_jump_length ();
1925 
1926   /* We need to know some information for each basic block.  */
1927   array_size = GET_ARRAY_SIZE (last_basic_block);
1928   bbd = XNEWVEC (bbro_basic_block_data, array_size);
1929   for (i = 0; i < array_size; i++)
1930     {
1931       bbd[i].start_of_trace = -1;
1932       bbd[i].in_trace = -1;
1933       bbd[i].end_of_trace = -1;
1934       bbd[i].heap = NULL;
1935       bbd[i].node = NULL;
1936     }
1937 
1938   traces = XNEWVEC (struct trace, n_basic_blocks);
1939   n_traces = 0;
1940   find_traces (&n_traces, traces);
1941   connect_traces (n_traces, traces);
1942   FREE (traces);
1943   FREE (bbd);
1944 
1945   relink_block_chain (/*stay_in_cfglayout_mode=*/true);
1946 
1947   if (dump_file)
1948     dump_flow_info (dump_file, dump_flags);
1949 
1950   if (flag_reorder_blocks_and_partition)
1951     verify_hot_cold_block_grouping ();
1952 }
1953 
1954 /* Determine which partition the first basic block in the function
1955    belongs to, then find the first basic block in the current function
1956    that belongs to a different section, and insert a
1957    NOTE_INSN_SWITCH_TEXT_SECTIONS note immediately before it in the
1958    instruction stream.  When writing out the assembly code,
1959    encountering this note will make the compiler switch between the
1960    hot and cold text sections.  */
1961 
1962 static void
1963 insert_section_boundary_note (void)
1964 {
1965   basic_block bb;
1966   rtx new_note;
1967   int first_partition = 0;
1968 
1969   if (!flag_reorder_blocks_and_partition)
1970     return;
1971 
1972   FOR_EACH_BB (bb)
1973     {
1974       if (!first_partition)
1975 	first_partition = BB_PARTITION (bb);
1976       if (BB_PARTITION (bb) != first_partition)
1977 	{
1978 	  new_note = emit_note_before (NOTE_INSN_SWITCH_TEXT_SECTIONS,
1979 				       BB_HEAD (bb));
1980 	  /* ??? This kind of note always lives between basic blocks,
1981 	     but add_insn_before will set BLOCK_FOR_INSN anyway.  */
1982 	  BLOCK_FOR_INSN (new_note) = NULL;
1983 	  break;
1984 	}
1985     }
1986 }
1987 
1988 /* Duplicate the blocks containing computed gotos.  This basically unfactors
1989    computed gotos that were factored early on in the compilation process to
1990    speed up edge based data flow.  We used to not unfactoring them again,
1991    which can seriously pessimize code with many computed jumps in the source
1992    code, such as interpreters.  See e.g. PR15242.  */
1993 
1994 static bool
1995 gate_duplicate_computed_gotos (void)
1996 {
1997   if (targetm.cannot_modify_jumps_p ())
1998     return false;
1999   return (optimize > 0
2000 	  && flag_expensive_optimizations
2001 	  && ! optimize_function_for_size_p (cfun));
2002 }
2003 
2004 
2005 static unsigned int
2006 duplicate_computed_gotos (void)
2007 {
2008   basic_block bb, new_bb;
2009   bitmap candidates;
2010   int max_size;
2011 
2012   if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
2013     return 0;
2014 
2015   cfg_layout_initialize (0);
2016 
2017   /* We are estimating the length of uncond jump insn only once
2018      since the code for getting the insn length always returns
2019      the minimal length now.  */
2020   if (uncond_jump_length == 0)
2021     uncond_jump_length = get_uncond_jump_length ();
2022 
2023   max_size = uncond_jump_length * PARAM_VALUE (PARAM_MAX_GOTO_DUPLICATION_INSNS);
2024   candidates = BITMAP_ALLOC (NULL);
2025 
2026   /* Look for blocks that end in a computed jump, and see if such blocks
2027      are suitable for unfactoring.  If a block is a candidate for unfactoring,
2028      mark it in the candidates.  */
2029   FOR_EACH_BB (bb)
2030     {
2031       rtx insn;
2032       edge e;
2033       edge_iterator ei;
2034       int size, all_flags;
2035 
2036       /* Build the reorder chain for the original order of blocks.  */
2037       if (bb->next_bb != EXIT_BLOCK_PTR)
2038 	bb->aux = bb->next_bb;
2039 
2040       /* Obviously the block has to end in a computed jump.  */
2041       if (!computed_jump_p (BB_END (bb)))
2042 	continue;
2043 
2044       /* Only consider blocks that can be duplicated.  */
2045       if (find_reg_note (BB_END (bb), REG_CROSSING_JUMP, NULL_RTX)
2046 	  || !can_duplicate_block_p (bb))
2047 	continue;
2048 
2049       /* Make sure that the block is small enough.  */
2050       size = 0;
2051       FOR_BB_INSNS (bb, insn)
2052 	if (INSN_P (insn))
2053 	  {
2054 	    size += get_attr_min_length (insn);
2055 	    if (size > max_size)
2056 	       break;
2057 	  }
2058       if (size > max_size)
2059 	continue;
2060 
2061       /* Final check: there must not be any incoming abnormal edges.  */
2062       all_flags = 0;
2063       FOR_EACH_EDGE (e, ei, bb->preds)
2064 	all_flags |= e->flags;
2065       if (all_flags & EDGE_COMPLEX)
2066 	continue;
2067 
2068       bitmap_set_bit (candidates, bb->index);
2069     }
2070 
2071   /* Nothing to do if there is no computed jump here.  */
2072   if (bitmap_empty_p (candidates))
2073     goto done;
2074 
2075   /* Duplicate computed gotos.  */
2076   FOR_EACH_BB (bb)
2077     {
2078       if (bb->il.rtl->visited)
2079 	continue;
2080 
2081       bb->il.rtl->visited = 1;
2082 
2083       /* BB must have one outgoing edge.  That edge must not lead to
2084 	 the exit block or the next block.
2085 	 The destination must have more than one predecessor.  */
2086       if (!single_succ_p (bb)
2087 	  || single_succ (bb) == EXIT_BLOCK_PTR
2088 	  || single_succ (bb) == bb->next_bb
2089 	  || single_pred_p (single_succ (bb)))
2090 	continue;
2091 
2092       /* The successor block has to be a duplication candidate.  */
2093       if (!bitmap_bit_p (candidates, single_succ (bb)->index))
2094 	continue;
2095 
2096       new_bb = duplicate_block (single_succ (bb), single_succ_edge (bb), bb);
2097       new_bb->aux = bb->aux;
2098       bb->aux = new_bb;
2099       new_bb->il.rtl->visited = 1;
2100     }
2101 
2102 done:
2103   cfg_layout_finalize ();
2104 
2105   BITMAP_FREE (candidates);
2106   return 0;
2107 }
2108 
2109 struct rtl_opt_pass pass_duplicate_computed_gotos =
2110 {
2111  {
2112   RTL_PASS,
2113   "compgotos",                          /* name */
2114   gate_duplicate_computed_gotos,        /* gate */
2115   duplicate_computed_gotos,             /* execute */
2116   NULL,                                 /* sub */
2117   NULL,                                 /* next */
2118   0,                                    /* static_pass_number */
2119   TV_REORDER_BLOCKS,                    /* tv_id */
2120   0,                                    /* properties_required */
2121   0,                                    /* properties_provided */
2122   0,                                    /* properties_destroyed */
2123   0,                                    /* todo_flags_start */
2124   TODO_verify_rtl_sharing,/* todo_flags_finish */
2125  }
2126 };
2127 
2128 
2129 /* This function is the main 'entrance' for the optimization that
2130    partitions hot and cold basic blocks into separate sections of the
2131    .o file (to improve performance and cache locality).  Ideally it
2132    would be called after all optimizations that rearrange the CFG have
2133    been called.  However part of this optimization may introduce new
2134    register usage, so it must be called before register allocation has
2135    occurred.  This means that this optimization is actually called
2136    well before the optimization that reorders basic blocks (see
2137    function above).
2138 
2139    This optimization checks the feedback information to determine
2140    which basic blocks are hot/cold, updates flags on the basic blocks
2141    to indicate which section they belong in.  This information is
2142    later used for writing out sections in the .o file.  Because hot
2143    and cold sections can be arbitrarily large (within the bounds of
2144    memory), far beyond the size of a single function, it is necessary
2145    to fix up all edges that cross section boundaries, to make sure the
2146    instructions used can actually span the required distance.  The
2147    fixes are described below.
2148 
2149    Fall-through edges must be changed into jumps; it is not safe or
2150    legal to fall through across a section boundary.  Whenever a
2151    fall-through edge crossing a section boundary is encountered, a new
2152    basic block is inserted (in the same section as the fall-through
2153    source), and the fall through edge is redirected to the new basic
2154    block.  The new basic block contains an unconditional jump to the
2155    original fall-through target.  (If the unconditional jump is
2156    insufficient to cross section boundaries, that is dealt with a
2157    little later, see below).
2158 
2159    In order to deal with architectures that have short conditional
2160    branches (which cannot span all of memory) we take any conditional
2161    jump that attempts to cross a section boundary and add a level of
2162    indirection: it becomes a conditional jump to a new basic block, in
2163    the same section.  The new basic block contains an unconditional
2164    jump to the original target, in the other section.
2165 
2166    For those architectures whose unconditional branch is also
2167    incapable of reaching all of memory, those unconditional jumps are
2168    converted into indirect jumps, through a register.
2169 
2170    IMPORTANT NOTE: This optimization causes some messy interactions
2171    with the cfg cleanup optimizations; those optimizations want to
2172    merge blocks wherever possible, and to collapse indirect jump
2173    sequences (change "A jumps to B jumps to C" directly into "A jumps
2174    to C").  Those optimizations can undo the jump fixes that
2175    partitioning is required to make (see above), in order to ensure
2176    that jumps attempting to cross section boundaries are really able
2177    to cover whatever distance the jump requires (on many architectures
2178    conditional or unconditional jumps are not able to reach all of
2179    memory).  Therefore tests have to be inserted into each such
2180    optimization to make sure that it does not undo stuff necessary to
2181    cross partition boundaries.  This would be much less of a problem
2182    if we could perform this optimization later in the compilation, but
2183    unfortunately the fact that we may need to create indirect jumps
2184    (through registers) requires that this optimization be performed
2185    before register allocation.
2186 
2187    Hot and cold basic blocks are partitioned and put in separate
2188    sections of the .o file, to reduce paging and improve cache
2189    performance (hopefully).  This can result in bits of code from the
2190    same function being widely separated in the .o file.  However this
2191    is not obvious to the current bb structure.  Therefore we must take
2192    care to ensure that: 1). There are no fall_thru edges that cross
2193    between sections; 2). For those architectures which have "short"
2194    conditional branches, all conditional branches that attempt to
2195    cross between sections are converted to unconditional branches;
2196    and, 3). For those architectures which have "short" unconditional
2197    branches, all unconditional branches that attempt to cross between
2198    sections are converted to indirect jumps.
2199 
2200    The code for fixing up fall_thru edges that cross between hot and
2201    cold basic blocks does so by creating new basic blocks containing
2202    unconditional branches to the appropriate label in the "other"
2203    section.  The new basic block is then put in the same (hot or cold)
2204    section as the original conditional branch, and the fall_thru edge
2205    is modified to fall into the new basic block instead.  By adding
2206    this level of indirection we end up with only unconditional branches
2207    crossing between hot and cold sections.
2208 
2209    Conditional branches are dealt with by adding a level of indirection.
2210    A new basic block is added in the same (hot/cold) section as the
2211    conditional branch, and the conditional branch is retargeted to the
2212    new basic block.  The new basic block contains an unconditional branch
2213    to the original target of the conditional branch (in the other section).
2214 
2215    Unconditional branches are dealt with by converting them into
2216    indirect jumps.  */
2217 
2218 static unsigned
2219 partition_hot_cold_basic_blocks (void)
2220 {
2221   VEC(edge, heap) *crossing_edges;
2222 
2223   if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
2224     return 0;
2225 
2226   df_set_flags (DF_DEFER_INSN_RESCAN);
2227 
2228   crossing_edges = find_rarely_executed_basic_blocks_and_crossing_edges ();
2229   if (crossing_edges == NULL)
2230     return 0;
2231 
2232   /* Make sure the source of any crossing edge ends in a jump and the
2233      destination of any crossing edge has a label.  */
2234   add_labels_and_missing_jumps (crossing_edges);
2235 
2236   /* Convert all crossing fall_thru edges to non-crossing fall
2237      thrus to unconditional jumps (that jump to the original fall
2238      thru dest).  */
2239   fix_up_fall_thru_edges ();
2240 
2241   /* If the architecture does not have conditional branches that can
2242      span all of memory, convert crossing conditional branches into
2243      crossing unconditional branches.  */
2244   if (!HAS_LONG_COND_BRANCH)
2245     fix_crossing_conditional_branches ();
2246 
2247   /* If the architecture does not have unconditional branches that
2248      can span all of memory, convert crossing unconditional branches
2249      into indirect jumps.  Since adding an indirect jump also adds
2250      a new register usage, update the register usage information as
2251      well.  */
2252   if (!HAS_LONG_UNCOND_BRANCH)
2253     fix_crossing_unconditional_branches ();
2254 
2255   add_reg_crossing_jump_notes ();
2256 
2257   /* Clear bb->aux fields that the above routines were using.  */
2258   clear_aux_for_blocks ();
2259 
2260   VEC_free (edge, heap, crossing_edges);
2261 
2262   /* ??? FIXME: DF generates the bb info for a block immediately.
2263      And by immediately, I mean *during* creation of the block.
2264 
2265 	#0  df_bb_refs_collect
2266 	#1  in df_bb_refs_record
2267 	#2  in create_basic_block_structure
2268 
2269      Which means that the bb_has_eh_pred test in df_bb_refs_collect
2270      will *always* fail, because no edges can have been added to the
2271      block yet.  Which of course means we don't add the right
2272      artificial refs, which means we fail df_verify (much) later.
2273 
2274      Cleanest solution would seem to make DF_DEFER_INSN_RESCAN imply
2275      that we also shouldn't grab data from the new blocks those new
2276      insns are in either.  In this way one can create the block, link
2277      it up properly, and have everything Just Work later, when deferred
2278      insns are processed.
2279 
2280      In the meantime, we have no other option but to throw away all
2281      of the DF data and recompute it all.  */
2282   if (cfun->eh->lp_array)
2283     {
2284       df_finish_pass (true);
2285       df_scan_alloc (NULL);
2286       df_scan_blocks ();
2287       /* Not all post-landing pads use all of the EH_RETURN_DATA_REGNO
2288 	 data.  We blindly generated all of them when creating the new
2289 	 landing pad.  Delete those assignments we don't use.  */
2290       df_set_flags (DF_LR_RUN_DCE);
2291       df_analyze ();
2292     }
2293 
2294   return TODO_verify_flow | TODO_verify_rtl_sharing;
2295 }
2296 
2297 static bool
2298 gate_handle_reorder_blocks (void)
2299 {
2300   if (targetm.cannot_modify_jumps_p ())
2301     return false;
2302   /* Don't reorder blocks when optimizing for size because extra jump insns may
2303      be created; also barrier may create extra padding.
2304 
2305      More correctly we should have a block reordering mode that tried to
2306      minimize the combined size of all the jumps.  This would more or less
2307      automatically remove extra jumps, but would also try to use more short
2308      jumps instead of long jumps.  */
2309   if (!optimize_function_for_speed_p (cfun))
2310     return false;
2311   return (optimize > 0
2312 	  && (flag_reorder_blocks || flag_reorder_blocks_and_partition));
2313 }
2314 
2315 
2316 /* Reorder basic blocks.  */
2317 static unsigned int
2318 rest_of_handle_reorder_blocks (void)
2319 {
2320   basic_block bb;
2321 
2322   /* Last attempt to optimize CFG, as scheduling, peepholing and insn
2323      splitting possibly introduced more crossjumping opportunities.  */
2324   cfg_layout_initialize (CLEANUP_EXPENSIVE);
2325 
2326   reorder_basic_blocks ();
2327   cleanup_cfg (CLEANUP_EXPENSIVE);
2328 
2329   FOR_EACH_BB (bb)
2330     if (bb->next_bb != EXIT_BLOCK_PTR)
2331       bb->aux = bb->next_bb;
2332   cfg_layout_finalize ();
2333 
2334   /* Add NOTE_INSN_SWITCH_TEXT_SECTIONS notes.  */
2335   insert_section_boundary_note ();
2336   return 0;
2337 }
2338 
2339 struct rtl_opt_pass pass_reorder_blocks =
2340 {
2341  {
2342   RTL_PASS,
2343   "bbro",                               /* name */
2344   gate_handle_reorder_blocks,           /* gate */
2345   rest_of_handle_reorder_blocks,        /* execute */
2346   NULL,                                 /* sub */
2347   NULL,                                 /* next */
2348   0,                                    /* static_pass_number */
2349   TV_REORDER_BLOCKS,                    /* tv_id */
2350   0,                                    /* properties_required */
2351   0,                                    /* properties_provided */
2352   0,                                    /* properties_destroyed */
2353   0,                                    /* todo_flags_start */
2354   TODO_verify_rtl_sharing,              /* todo_flags_finish */
2355  }
2356 };
2357 
2358 static bool
2359 gate_handle_partition_blocks (void)
2360 {
2361   /* The optimization to partition hot/cold basic blocks into separate
2362      sections of the .o file does not work well with linkonce or with
2363      user defined section attributes.  Don't call it if either case
2364      arises.  */
2365   return (flag_reorder_blocks_and_partition
2366           && optimize
2367 	  /* See gate_handle_reorder_blocks.  We should not partition if
2368 	     we are going to omit the reordering.  */
2369 	  && optimize_function_for_speed_p (cfun)
2370 	  && !DECL_ONE_ONLY (current_function_decl)
2371 	  && !user_defined_section_attribute);
2372 }
2373 
2374 struct rtl_opt_pass pass_partition_blocks =
2375 {
2376  {
2377   RTL_PASS,
2378   "bbpart",                             /* name */
2379   gate_handle_partition_blocks,         /* gate */
2380   partition_hot_cold_basic_blocks,      /* execute */
2381   NULL,                                 /* sub */
2382   NULL,                                 /* next */
2383   0,                                    /* static_pass_number */
2384   TV_REORDER_BLOCKS,                    /* tv_id */
2385   PROP_cfglayout,                       /* properties_required */
2386   0,                                    /* properties_provided */
2387   0,                                    /* properties_destroyed */
2388   0,                                    /* todo_flags_start */
2389   0					/* todo_flags_finish */
2390  }
2391 };
2392