1 /* Instruction scheduling pass.
2    Copyright (C) 1992-2019 Free Software Foundation, Inc.
3    Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4    and currently maintained by, Jim Wilson (wilson@cygnus.com)
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 /* This pass implements list scheduling within basic blocks.  It is
23    run twice: (1) after flow analysis, but before register allocation,
24    and (2) after register allocation.
25 
26    The first run performs interblock scheduling, moving insns between
27    different blocks in the same "region", and the second runs only
28    basic block scheduling.
29 
30    Interblock motions performed are useful motions and speculative
31    motions, including speculative loads.  Motions requiring code
32    duplication are not supported.  The identification of motion type
33    and the check for validity of speculative motions requires
34    construction and analysis of the function's control flow graph.
35 
36    The main entry point for this pass is schedule_insns(), called for
37    each function.  The work of the scheduler is organized in three
38    levels: (1) function level: insns are subject to splitting,
39    control-flow-graph is constructed, regions are computed (after
40    reload, each region is of one block), (2) region level: control
41    flow graph attributes required for interblock scheduling are
42    computed (dominators, reachability, etc.), data dependences and
43    priorities are computed, and (3) block level: insns in the block
44    are actually scheduled.  */
45 
46 #include "config.h"
47 #include "system.h"
48 #include "coretypes.h"
49 #include "backend.h"
50 #include "target.h"
51 #include "rtl.h"
52 #include "df.h"
53 #include "memmodel.h"
54 #include "tm_p.h"
55 #include "insn-config.h"
56 #include "emit-rtl.h"
57 #include "recog.h"
58 #include "profile.h"
59 #include "insn-attr.h"
60 #include "except.h"
61 #include "params.h"
62 #include "cfganal.h"
63 #include "sched-int.h"
64 #include "sel-sched.h"
65 #include "tree-pass.h"
66 #include "dbgcnt.h"
67 #include "pretty-print.h"
68 #include "print-rtl.h"
69 
70 #ifdef INSN_SCHEDULING
71 
72 /* Some accessor macros for h_i_d members only used within this file.  */
73 #define FED_BY_SPEC_LOAD(INSN) (HID (INSN)->fed_by_spec_load)
74 #define IS_LOAD_INSN(INSN) (HID (insn)->is_load_insn)
75 
76 /* nr_inter/spec counts interblock/speculative motion for the function.  */
77 static int nr_inter, nr_spec;
78 
79 static int is_cfg_nonregular (void);
80 
81 /* Number of regions in the procedure.  */
82 int nr_regions = 0;
83 
84 /* Same as above before adding any new regions.  */
85 static int nr_regions_initial = 0;
86 
87 /* Table of region descriptions.  */
88 region *rgn_table = NULL;
89 
90 /* Array of lists of regions' blocks.  */
91 int *rgn_bb_table = NULL;
92 
93 /* Topological order of blocks in the region (if b2 is reachable from
94    b1, block_to_bb[b2] > block_to_bb[b1]).  Note: A basic block is
95    always referred to by either block or b, while its topological
96    order name (in the region) is referred to by bb.  */
97 int *block_to_bb = NULL;
98 
99 /* The number of the region containing a block.  */
100 int *containing_rgn = NULL;
101 
102 /* ebb_head [i] - is index in rgn_bb_table of the head basic block of i'th ebb.
103    Currently we can get a ebb only through splitting of currently
104    scheduling block, therefore, we don't need ebb_head array for every region,
105    hence, its sufficient to hold it for current one only.  */
106 int *ebb_head = NULL;
107 
108 /* The minimum probability of reaching a source block so that it will be
109    considered for speculative scheduling.  */
110 static int min_spec_prob;
111 
112 static void find_single_block_region (bool);
113 static void find_rgns (void);
114 static bool too_large (int, int *, int *);
115 
116 /* Blocks of the current region being scheduled.  */
117 int current_nr_blocks;
118 int current_blocks;
119 
120 /* A speculative motion requires checking live information on the path
121    from 'source' to 'target'.  The split blocks are those to be checked.
122    After a speculative motion, live information should be modified in
123    the 'update' blocks.
124 
125    Lists of split and update blocks for each candidate of the current
126    target are in array bblst_table.  */
127 static basic_block *bblst_table;
128 static int bblst_size, bblst_last;
129 
130 /* Arrays that hold the DFA state at the end of a basic block, to re-use
131    as the initial state at the start of successor blocks.  The BB_STATE
132    array holds the actual DFA state, and BB_STATE_ARRAY[I] is a pointer
133    into BB_STATE for basic block I.  FIXME: This should be a vec.  */
134 static char *bb_state_array = NULL;
135 static state_t *bb_state = NULL;
136 
137 /* Target info declarations.
138 
139    The block currently being scheduled is referred to as the "target" block,
140    while other blocks in the region from which insns can be moved to the
141    target are called "source" blocks.  The candidate structure holds info
142    about such sources: are they valid?  Speculative?  Etc.  */
143 struct bblst
144 {
145   basic_block *first_member;
146   int nr_members;
147 };
148 
149 struct candidate
150 {
151   char is_valid;
152   char is_speculative;
153   int src_prob;
154   bblst split_bbs;
155   bblst update_bbs;
156 };
157 
158 static candidate *candidate_table;
159 #define IS_VALID(src) (candidate_table[src].is_valid)
160 #define IS_SPECULATIVE(src) (candidate_table[src].is_speculative)
161 #define IS_SPECULATIVE_INSN(INSN)			\
162   (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
163 #define SRC_PROB(src) ( candidate_table[src].src_prob )
164 
165 /* The bb being currently scheduled.  */
166 int target_bb;
167 
168 /* List of edges.  */
169 struct edgelst
170 {
171   edge *first_member;
172   int nr_members;
173 };
174 
175 static edge *edgelst_table;
176 static int edgelst_last;
177 
178 static void extract_edgelst (sbitmap, edgelst *);
179 
180 /* Target info functions.  */
181 static void split_edges (int, int, edgelst *);
182 static void compute_trg_info (int);
183 void debug_candidate (int);
184 void debug_candidates (int);
185 
186 /* Dominators array: dom[i] contains the sbitmap of dominators of
187    bb i in the region.  */
188 static sbitmap *dom;
189 
190 /* bb 0 is the only region entry.  */
191 #define IS_RGN_ENTRY(bb) (!bb)
192 
193 /* Is bb_src dominated by bb_trg.  */
194 #define IS_DOMINATED(bb_src, bb_trg)                                 \
195 ( bitmap_bit_p (dom[bb_src], bb_trg) )
196 
197 /* Probability: Prob[i] is an int in [0, REG_BR_PROB_BASE] which is
198    the probability of bb i relative to the region entry.  */
199 static int *prob;
200 
201 /* Bit-set of edges, where bit i stands for edge i.  */
202 typedef sbitmap edgeset;
203 
204 /* Number of edges in the region.  */
205 static int rgn_nr_edges;
206 
207 /* Array of size rgn_nr_edges.  */
208 static edge *rgn_edges;
209 
210 /* Mapping from each edge in the graph to its number in the rgn.  */
211 #define EDGE_TO_BIT(edge) ((int)(size_t)(edge)->aux)
212 #define SET_EDGE_TO_BIT(edge,nr) ((edge)->aux = (void *)(size_t)(nr))
213 
214 /* The split edges of a source bb is different for each target
215    bb.  In order to compute this efficiently, the 'potential-split edges'
216    are computed for each bb prior to scheduling a region.  This is actually
217    the split edges of each bb relative to the region entry.
218 
219    pot_split[bb] is the set of potential split edges of bb.  */
220 static edgeset *pot_split;
221 
222 /* For every bb, a set of its ancestor edges.  */
223 static edgeset *ancestor_edges;
224 
225 #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
226 
227 /* Speculative scheduling functions.  */
228 static int check_live_1 (int, rtx);
229 static void update_live_1 (int, rtx);
230 static int is_pfree (rtx, int, int);
231 static int find_conditional_protection (rtx_insn *, int);
232 static int is_conditionally_protected (rtx, int, int);
233 static int is_prisky (rtx, int, int);
234 static int is_exception_free (rtx_insn *, int, int);
235 
236 static bool sets_likely_spilled (rtx);
237 static void sets_likely_spilled_1 (rtx, const_rtx, void *);
238 static void add_branch_dependences (rtx_insn *, rtx_insn *);
239 static void compute_block_dependences (int);
240 
241 static void schedule_region (int);
242 static void concat_insn_mem_list (rtx_insn_list *, rtx_expr_list *,
243 				  rtx_insn_list **, rtx_expr_list **);
244 static void propagate_deps (int, struct deps_desc *);
245 static void free_pending_lists (void);
246 
247 /* Functions for construction of the control flow graph.  */
248 
249 /* Return 1 if control flow graph should not be constructed, 0 otherwise.
250 
251    We decide not to build the control flow graph if there is possibly more
252    than one entry to the function, if computed branches exist, if we
253    have nonlocal gotos, or if we have an unreachable loop.  */
254 
255 static int
is_cfg_nonregular(void)256 is_cfg_nonregular (void)
257 {
258   basic_block b;
259   rtx_insn *insn;
260 
261   /* If we have a label that could be the target of a nonlocal goto, then
262      the cfg is not well structured.  */
263   if (nonlocal_goto_handler_labels)
264     return 1;
265 
266   /* If we have any forced labels, then the cfg is not well structured.  */
267   if (forced_labels)
268     return 1;
269 
270   /* If we have exception handlers, then we consider the cfg not well
271      structured.  ?!?  We should be able to handle this now that we
272      compute an accurate cfg for EH.  */
273   if (current_function_has_exception_handlers ())
274     return 1;
275 
276   /* If we have insns which refer to labels as non-jumped-to operands,
277      then we consider the cfg not well structured.  */
278   FOR_EACH_BB_FN (b, cfun)
279     FOR_BB_INSNS (b, insn)
280       {
281 	rtx note, set, dest;
282 	rtx_insn *next;
283 
284 	/* If this function has a computed jump, then we consider the cfg
285 	   not well structured.  */
286 	if (JUMP_P (insn) && computed_jump_p (insn))
287 	  return 1;
288 
289 	if (!INSN_P (insn))
290 	  continue;
291 
292 	note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX);
293 	if (note == NULL_RTX)
294 	  continue;
295 
296 	/* For that label not to be seen as a referred-to label, this
297 	   must be a single-set which is feeding a jump *only*.  This
298 	   could be a conditional jump with the label split off for
299 	   machine-specific reasons or a casesi/tablejump.  */
300 	next = next_nonnote_insn (insn);
301 	if (next == NULL_RTX
302 	    || !JUMP_P (next)
303 	    || (JUMP_LABEL (next) != XEXP (note, 0)
304 		&& find_reg_note (next, REG_LABEL_TARGET,
305 				  XEXP (note, 0)) == NULL_RTX)
306 	    || BLOCK_FOR_INSN (insn) != BLOCK_FOR_INSN (next))
307 	  return 1;
308 
309 	set = single_set (insn);
310 	if (set == NULL_RTX)
311 	  return 1;
312 
313 	dest = SET_DEST (set);
314 	if (!REG_P (dest) || !dead_or_set_p (next, dest))
315 	  return 1;
316       }
317 
318   /* Unreachable loops with more than one basic block are detected
319      during the DFS traversal in find_rgns.
320 
321      Unreachable loops with a single block are detected here.  This
322      test is redundant with the one in find_rgns, but it's much
323      cheaper to go ahead and catch the trivial case here.  */
324   FOR_EACH_BB_FN (b, cfun)
325     {
326       if (EDGE_COUNT (b->preds) == 0
327 	  || (single_pred_p (b)
328 	      && single_pred (b) == b))
329 	return 1;
330     }
331 
332   /* All the tests passed.  Consider the cfg well structured.  */
333   return 0;
334 }
335 
336 /* Extract list of edges from a bitmap containing EDGE_TO_BIT bits.  */
337 
338 static void
extract_edgelst(sbitmap set,edgelst * el)339 extract_edgelst (sbitmap set, edgelst *el)
340 {
341   unsigned int i = 0;
342   sbitmap_iterator sbi;
343 
344   /* edgelst table space is reused in each call to extract_edgelst.  */
345   edgelst_last = 0;
346 
347   el->first_member = &edgelst_table[edgelst_last];
348   el->nr_members = 0;
349 
350   /* Iterate over each word in the bitset.  */
351   EXECUTE_IF_SET_IN_BITMAP (set, 0, i, sbi)
352     {
353       edgelst_table[edgelst_last++] = rgn_edges[i];
354       el->nr_members++;
355     }
356 }
357 
358 /* Functions for the construction of regions.  */
359 
360 /* Print the regions, for debugging purposes.  Callable from debugger.  */
361 
362 DEBUG_FUNCTION void
debug_regions(void)363 debug_regions (void)
364 {
365   int rgn, bb;
366 
367   fprintf (sched_dump, "\n;;   ------------ REGIONS ----------\n\n");
368   for (rgn = 0; rgn < nr_regions; rgn++)
369     {
370       fprintf (sched_dump, ";;\trgn %d nr_blocks %d:\n", rgn,
371 	       rgn_table[rgn].rgn_nr_blocks);
372       fprintf (sched_dump, ";;\tbb/block: ");
373 
374       /* We don't have ebb_head initialized yet, so we can't use
375 	 BB_TO_BLOCK ().  */
376       current_blocks = RGN_BLOCKS (rgn);
377 
378       for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
379 	fprintf (sched_dump, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]);
380 
381       fprintf (sched_dump, "\n\n");
382     }
383 }
384 
385 /* Print the region's basic blocks.  */
386 
387 DEBUG_FUNCTION void
debug_region(int rgn)388 debug_region (int rgn)
389 {
390   int bb;
391 
392   fprintf (stderr, "\n;;   ------------ REGION %d ----------\n\n", rgn);
393   fprintf (stderr, ";;\trgn %d nr_blocks %d:\n", rgn,
394 	   rgn_table[rgn].rgn_nr_blocks);
395   fprintf (stderr, ";;\tbb/block: ");
396 
397   /* We don't have ebb_head initialized yet, so we can't use
398      BB_TO_BLOCK ().  */
399   current_blocks = RGN_BLOCKS (rgn);
400 
401   for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
402     fprintf (stderr, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]);
403 
404   fprintf (stderr, "\n\n");
405 
406   for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
407     {
408       dump_bb (stderr,
409 	       BASIC_BLOCK_FOR_FN (cfun, rgn_bb_table[current_blocks + bb]),
410 	       0, TDF_SLIM | TDF_BLOCKS);
411       fprintf (stderr, "\n");
412     }
413 
414   fprintf (stderr, "\n");
415 
416 }
417 
418 /* True when a bb with index BB_INDEX contained in region RGN.  */
419 static bool
bb_in_region_p(int bb_index,int rgn)420 bb_in_region_p (int bb_index, int rgn)
421 {
422   int i;
423 
424   for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++)
425     if (rgn_bb_table[current_blocks + i] == bb_index)
426       return true;
427 
428   return false;
429 }
430 
431 /* Dump region RGN to file F using dot syntax.  */
432 void
dump_region_dot(FILE * f,int rgn)433 dump_region_dot (FILE *f, int rgn)
434 {
435   int i;
436 
437   fprintf (f, "digraph Region_%d {\n", rgn);
438 
439   /* We don't have ebb_head initialized yet, so we can't use
440      BB_TO_BLOCK ().  */
441   current_blocks = RGN_BLOCKS (rgn);
442 
443   for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++)
444     {
445       edge e;
446       edge_iterator ei;
447       int src_bb_num = rgn_bb_table[current_blocks + i];
448       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, src_bb_num);
449 
450       FOR_EACH_EDGE (e, ei, bb->succs)
451         if (bb_in_region_p (e->dest->index, rgn))
452 	  fprintf (f, "\t%d -> %d\n", src_bb_num, e->dest->index);
453     }
454   fprintf (f, "}\n");
455 }
456 
457 /* The same, but first open a file specified by FNAME.  */
458 void
dump_region_dot_file(const char * fname,int rgn)459 dump_region_dot_file (const char *fname, int rgn)
460 {
461   FILE *f = fopen (fname, "wt");
462   dump_region_dot (f, rgn);
463   fclose (f);
464 }
465 
466 /* Build a single block region for each basic block in the function.
467    This allows for using the same code for interblock and basic block
468    scheduling.  */
469 
470 static void
find_single_block_region(bool ebbs_p)471 find_single_block_region (bool ebbs_p)
472 {
473   basic_block bb, ebb_start;
474   int i = 0;
475 
476   nr_regions = 0;
477 
478   if (ebbs_p) {
479     int probability_cutoff;
480     if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
481       probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
482     else
483       probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
484     probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
485 
486     FOR_EACH_BB_FN (ebb_start, cfun)
487       {
488         RGN_NR_BLOCKS (nr_regions) = 0;
489         RGN_BLOCKS (nr_regions) = i;
490         RGN_DONT_CALC_DEPS (nr_regions) = 0;
491         RGN_HAS_REAL_EBB (nr_regions) = 0;
492 
493         for (bb = ebb_start; ; bb = bb->next_bb)
494           {
495             edge e;
496 
497             rgn_bb_table[i] = bb->index;
498             RGN_NR_BLOCKS (nr_regions)++;
499             CONTAINING_RGN (bb->index) = nr_regions;
500             BLOCK_TO_BB (bb->index) = i - RGN_BLOCKS (nr_regions);
501             i++;
502 
503 	    if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
504                 || LABEL_P (BB_HEAD (bb->next_bb)))
505               break;
506 
507 	    e = find_fallthru_edge (bb->succs);
508             if (! e)
509               break;
510             if (e->probability.initialized_p ()
511 		&& e->probability.to_reg_br_prob_base () <= probability_cutoff)
512               break;
513           }
514 
515         ebb_start = bb;
516         nr_regions++;
517       }
518   }
519   else
520     FOR_EACH_BB_FN (bb, cfun)
521       {
522         rgn_bb_table[nr_regions] = bb->index;
523         RGN_NR_BLOCKS (nr_regions) = 1;
524         RGN_BLOCKS (nr_regions) = nr_regions;
525         RGN_DONT_CALC_DEPS (nr_regions) = 0;
526         RGN_HAS_REAL_EBB (nr_regions) = 0;
527 
528         CONTAINING_RGN (bb->index) = nr_regions;
529         BLOCK_TO_BB (bb->index) = 0;
530         nr_regions++;
531       }
532 }
533 
534 /* Estimate number of the insns in the BB.  */
535 static int
rgn_estimate_number_of_insns(basic_block bb)536 rgn_estimate_number_of_insns (basic_block bb)
537 {
538   int count;
539 
540   count = INSN_LUID (BB_END (bb)) - INSN_LUID (BB_HEAD (bb));
541 
542   if (MAY_HAVE_DEBUG_INSNS)
543     {
544       rtx_insn *insn;
545 
546       FOR_BB_INSNS (bb, insn)
547 	if (DEBUG_INSN_P (insn))
548 	  count--;
549     }
550 
551   return count;
552 }
553 
554 /* Update number of blocks and the estimate for number of insns
555    in the region.  Return true if the region is "too large" for interblock
556    scheduling (compile time considerations).  */
557 
558 static bool
too_large(int block,int * num_bbs,int * num_insns)559 too_large (int block, int *num_bbs, int *num_insns)
560 {
561   (*num_bbs)++;
562   (*num_insns) += (common_sched_info->estimate_number_of_insns
563                    (BASIC_BLOCK_FOR_FN (cfun, block)));
564 
565   return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS))
566 	  || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS)));
567 }
568 
569 /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
570    is still an inner loop.  Put in max_hdr[blk] the header of the most inner
571    loop containing blk.  */
572 #define UPDATE_LOOP_RELATIONS(blk, hdr)		\
573 {						\
574   if (max_hdr[blk] == -1)			\
575     max_hdr[blk] = hdr;				\
576   else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr])	\
577     bitmap_clear_bit (inner, hdr);			\
578   else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr])	\
579     {						\
580       bitmap_clear_bit (inner,max_hdr[blk]);		\
581       max_hdr[blk] = hdr;			\
582     }						\
583 }
584 
585 /* Find regions for interblock scheduling.
586 
587    A region for scheduling can be:
588 
589      * A loop-free procedure, or
590 
591      * A reducible inner loop, or
592 
593      * A basic block not contained in any other region.
594 
595    ?!? In theory we could build other regions based on extended basic
596    blocks or reverse extended basic blocks.  Is it worth the trouble?
597 
598    Loop blocks that form a region are put into the region's block list
599    in topological order.
600 
601    This procedure stores its results into the following global (ick) variables
602 
603      * rgn_nr
604      * rgn_table
605      * rgn_bb_table
606      * block_to_bb
607      * containing region
608 
609    We use dominator relationships to avoid making regions out of non-reducible
610    loops.
611 
612    This procedure needs to be converted to work on pred/succ lists instead
613    of edge tables.  That would simplify it somewhat.  */
614 
615 static void
haifa_find_rgns(void)616 haifa_find_rgns (void)
617 {
618   int *max_hdr, *dfs_nr, *degree;
619   char no_loops = 1;
620   int node, child, loop_head, i, head, tail;
621   int count = 0, sp, idx = 0;
622   edge_iterator current_edge;
623   edge_iterator *stack;
624   int num_bbs, num_insns, unreachable;
625   int too_large_failure;
626   basic_block bb;
627 
628   /* Perform a DFS traversal of the cfg.  Identify loop headers, inner loops
629      and a mapping from block to its loop header (if the block is contained
630      in a loop, else -1).
631 
632      Store results in HEADER, INNER, and MAX_HDR respectively, these will
633      be used as inputs to the second traversal.
634 
635      STACK, SP and DFS_NR are only used during the first traversal.  */
636 
637   /* Allocate and initialize variables for the first traversal.  */
638   max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
639   dfs_nr = XCNEWVEC (int, last_basic_block_for_fn (cfun));
640   stack = XNEWVEC (edge_iterator, n_edges_for_fn (cfun));
641 
642   /* Note if a block is a natural inner loop header.  */
643   auto_sbitmap inner (last_basic_block_for_fn (cfun));
644   bitmap_ones (inner);
645 
646   /* Note if a block is a natural loop header.  */
647   auto_sbitmap header (last_basic_block_for_fn (cfun));
648   bitmap_clear (header);
649 
650   /* Note if a block is in the block queue.  */
651   auto_sbitmap in_queue (last_basic_block_for_fn (cfun));
652   bitmap_clear (in_queue);
653 
654   /* Note if a block is in the block queue.  */
655   auto_sbitmap in_stack (last_basic_block_for_fn (cfun));
656   bitmap_clear (in_stack);
657 
658   for (i = 0; i < last_basic_block_for_fn (cfun); i++)
659     max_hdr[i] = -1;
660 
661   #define EDGE_PASSED(E) (ei_end_p ((E)) || ei_edge ((E))->aux)
662   #define SET_EDGE_PASSED(E) (ei_edge ((E))->aux = ei_edge ((E)))
663 
664   /* DFS traversal to find inner loops in the cfg.  */
665 
666   current_edge = ei_start (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->succs);
667   sp = -1;
668 
669   while (1)
670     {
671       if (EDGE_PASSED (current_edge))
672 	{
673 	  /* We have reached a leaf node or a node that was already
674 	     processed.  Pop edges off the stack until we find
675 	     an edge that has not yet been processed.  */
676 	  while (sp >= 0 && EDGE_PASSED (current_edge))
677 	    {
678 	      /* Pop entry off the stack.  */
679 	      current_edge = stack[sp--];
680 	      node = ei_edge (current_edge)->src->index;
681 	      gcc_assert (node != ENTRY_BLOCK);
682 	      child = ei_edge (current_edge)->dest->index;
683 	      gcc_assert (child != EXIT_BLOCK);
684 	      bitmap_clear_bit (in_stack, child);
685 	      if (max_hdr[child] >= 0 && bitmap_bit_p (in_stack, max_hdr[child]))
686 		UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
687 	      ei_next (&current_edge);
688 	    }
689 
690 	  /* See if have finished the DFS tree traversal.  */
691 	  if (sp < 0 && EDGE_PASSED (current_edge))
692 	    break;
693 
694 	  /* Nope, continue the traversal with the popped node.  */
695 	  continue;
696 	}
697 
698       /* Process a node.  */
699       node = ei_edge (current_edge)->src->index;
700       gcc_assert (node != ENTRY_BLOCK);
701       bitmap_set_bit (in_stack, node);
702       dfs_nr[node] = ++count;
703 
704       /* We don't traverse to the exit block.  */
705       child = ei_edge (current_edge)->dest->index;
706       if (child == EXIT_BLOCK)
707 	{
708 	  SET_EDGE_PASSED (current_edge);
709 	  ei_next (&current_edge);
710 	  continue;
711 	}
712 
713       /* If the successor is in the stack, then we've found a loop.
714 	 Mark the loop, if it is not a natural loop, then it will
715 	 be rejected during the second traversal.  */
716       if (bitmap_bit_p (in_stack, child))
717 	{
718 	  no_loops = 0;
719 	  bitmap_set_bit (header, child);
720 	  UPDATE_LOOP_RELATIONS (node, child);
721 	  SET_EDGE_PASSED (current_edge);
722 	  ei_next (&current_edge);
723 	  continue;
724 	}
725 
726       /* If the child was already visited, then there is no need to visit
727 	 it again.  Just update the loop relationships and restart
728 	 with a new edge.  */
729       if (dfs_nr[child])
730 	{
731 	  if (max_hdr[child] >= 0 && bitmap_bit_p (in_stack, max_hdr[child]))
732 	    UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
733 	  SET_EDGE_PASSED (current_edge);
734 	  ei_next (&current_edge);
735 	  continue;
736 	}
737 
738       /* Push an entry on the stack and continue DFS traversal.  */
739       stack[++sp] = current_edge;
740       SET_EDGE_PASSED (current_edge);
741       current_edge = ei_start (ei_edge (current_edge)->dest->succs);
742     }
743 
744   /* Reset ->aux field used by EDGE_PASSED.  */
745   FOR_ALL_BB_FN (bb, cfun)
746     {
747       edge_iterator ei;
748       edge e;
749       FOR_EACH_EDGE (e, ei, bb->succs)
750 	e->aux = NULL;
751     }
752 
753 
754   /* Another check for unreachable blocks.  The earlier test in
755      is_cfg_nonregular only finds unreachable blocks that do not
756      form a loop.
757 
758      The DFS traversal will mark every block that is reachable from
759      the entry node by placing a nonzero value in dfs_nr.  Thus if
760      dfs_nr is zero for any block, then it must be unreachable.  */
761   unreachable = 0;
762   FOR_EACH_BB_FN (bb, cfun)
763     if (dfs_nr[bb->index] == 0)
764       {
765 	unreachable = 1;
766 	break;
767       }
768 
769   /* Gross.  To avoid wasting memory, the second pass uses the dfs_nr array
770      to hold degree counts.  */
771   degree = dfs_nr;
772 
773   FOR_EACH_BB_FN (bb, cfun)
774     degree[bb->index] = EDGE_COUNT (bb->preds);
775 
776   /* Do not perform region scheduling if there are any unreachable
777      blocks.  */
778   if (!unreachable)
779     {
780       int *queue, *degree1 = NULL;
781       /* We use EXTENDED_RGN_HEADER as an addition to HEADER and put
782 	 there basic blocks, which are forced to be region heads.
783 	 This is done to try to assemble few smaller regions
784 	 from a too_large region.  */
785       sbitmap extended_rgn_header = NULL;
786       bool extend_regions_p;
787 
788       if (no_loops)
789 	bitmap_set_bit (header, 0);
790 
791       /* Second traversal:find reducible inner loops and topologically sort
792 	 block of each region.  */
793 
794       queue = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
795 
796       extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0;
797       if (extend_regions_p)
798         {
799           degree1 = XNEWVEC (int, last_basic_block_for_fn (cfun));
800           extended_rgn_header =
801 	    sbitmap_alloc (last_basic_block_for_fn (cfun));
802           bitmap_clear (extended_rgn_header);
803 	}
804 
805       /* Find blocks which are inner loop headers.  We still have non-reducible
806 	 loops to consider at this point.  */
807       FOR_EACH_BB_FN (bb, cfun)
808 	{
809 	  if (bitmap_bit_p (header, bb->index) && bitmap_bit_p (inner, bb->index))
810 	    {
811 	      edge e;
812 	      edge_iterator ei;
813 	      basic_block jbb;
814 
815 	      /* Now check that the loop is reducible.  We do this separate
816 		 from finding inner loops so that we do not find a reducible
817 		 loop which contains an inner non-reducible loop.
818 
819 		 A simple way to find reducible/natural loops is to verify
820 		 that each block in the loop is dominated by the loop
821 		 header.
822 
823 		 If there exists a block that is not dominated by the loop
824 		 header, then the block is reachable from outside the loop
825 		 and thus the loop is not a natural loop.  */
826 	      FOR_EACH_BB_FN (jbb, cfun)
827 		{
828 		  /* First identify blocks in the loop, except for the loop
829 		     entry block.  */
830 		  if (bb->index == max_hdr[jbb->index] && bb != jbb)
831 		    {
832 		      /* Now verify that the block is dominated by the loop
833 			 header.  */
834 		      if (!dominated_by_p (CDI_DOMINATORS, jbb, bb))
835 			break;
836 		    }
837 		}
838 
839 	      /* If we exited the loop early, then I is the header of
840 		 a non-reducible loop and we should quit processing it
841 		 now.  */
842 	      if (jbb != EXIT_BLOCK_PTR_FOR_FN (cfun))
843 		continue;
844 
845 	      /* I is a header of an inner loop, or block 0 in a subroutine
846 		 with no loops at all.  */
847 	      head = tail = -1;
848 	      too_large_failure = 0;
849 	      loop_head = max_hdr[bb->index];
850 
851               if (extend_regions_p)
852                 /* We save degree in case when we meet a too_large region
853 		   and cancel it.  We need a correct degree later when
854                    calling extend_rgns.  */
855                 memcpy (degree1, degree,
856 			last_basic_block_for_fn (cfun) * sizeof (int));
857 
858 	      /* Decrease degree of all I's successors for topological
859 		 ordering.  */
860 	      FOR_EACH_EDGE (e, ei, bb->succs)
861 		if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
862 		  --degree[e->dest->index];
863 
864 	      /* Estimate # insns, and count # blocks in the region.  */
865 	      num_bbs = 1;
866 	      num_insns = common_sched_info->estimate_number_of_insns (bb);
867 
868 	      /* Find all loop latches (blocks with back edges to the loop
869 		 header) or all the leaf blocks in the cfg has no loops.
870 
871 		 Place those blocks into the queue.  */
872 	      if (no_loops)
873 		{
874 		  FOR_EACH_BB_FN (jbb, cfun)
875 		    /* Leaf nodes have only a single successor which must
876 		       be EXIT_BLOCK.  */
877 		    if (single_succ_p (jbb)
878 			&& single_succ (jbb) == EXIT_BLOCK_PTR_FOR_FN (cfun))
879 		      {
880 			queue[++tail] = jbb->index;
881 			bitmap_set_bit (in_queue, jbb->index);
882 
883 			if (too_large (jbb->index, &num_bbs, &num_insns))
884 			  {
885 			    too_large_failure = 1;
886 			    break;
887 			  }
888 		      }
889 		}
890 	      else
891 		{
892 		  edge e;
893 
894 		  FOR_EACH_EDGE (e, ei, bb->preds)
895 		    {
896 		      if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
897 			continue;
898 
899 		      node = e->src->index;
900 
901 		      if (max_hdr[node] == loop_head && node != bb->index)
902 			{
903 			  /* This is a loop latch.  */
904 			  queue[++tail] = node;
905 			  bitmap_set_bit (in_queue, node);
906 
907 			  if (too_large (node, &num_bbs, &num_insns))
908 			    {
909 			      too_large_failure = 1;
910 			      break;
911 			    }
912 			}
913 		    }
914 		}
915 
916 	      /* Now add all the blocks in the loop to the queue.
917 
918 	     We know the loop is a natural loop; however the algorithm
919 	     above will not always mark certain blocks as being in the
920 	     loop.  Consider:
921 		node   children
922 		 a	  b,c
923 		 b	  c
924 		 c	  a,d
925 		 d	  b
926 
927 	     The algorithm in the DFS traversal may not mark B & D as part
928 	     of the loop (i.e. they will not have max_hdr set to A).
929 
930 	     We know they cannot be loop latches (else they would have
931 	     had max_hdr set since they'd have a backedge to a dominator
932 	     block).  So we don't need them on the initial queue.
933 
934 	     We know they are part of the loop because they are dominated
935 	     by the loop header and can be reached by a backwards walk of
936 	     the edges starting with nodes on the initial queue.
937 
938 	     It is safe and desirable to include those nodes in the
939 	     loop/scheduling region.  To do so we would need to decrease
940 	     the degree of a node if it is the target of a backedge
941 	     within the loop itself as the node is placed in the queue.
942 
943 	     We do not do this because I'm not sure that the actual
944 	     scheduling code will properly handle this case. ?!? */
945 
946 	      while (head < tail && !too_large_failure)
947 		{
948 		  edge e;
949 		  child = queue[++head];
950 
951 		  FOR_EACH_EDGE (e, ei,
952 				 BASIC_BLOCK_FOR_FN (cfun, child)->preds)
953 		    {
954 		      node = e->src->index;
955 
956 		      /* See discussion above about nodes not marked as in
957 			 this loop during the initial DFS traversal.  */
958 		      if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
959 			  || max_hdr[node] != loop_head)
960 			{
961 			  tail = -1;
962 			  break;
963 			}
964 		      else if (!bitmap_bit_p (in_queue, node) && node != bb->index)
965 			{
966 			  queue[++tail] = node;
967 			  bitmap_set_bit (in_queue, node);
968 
969 			  if (too_large (node, &num_bbs, &num_insns))
970 			    {
971 			      too_large_failure = 1;
972 			      break;
973 			    }
974 			}
975 		    }
976 		}
977 
978 	      if (tail >= 0 && !too_large_failure)
979 		{
980 		  /* Place the loop header into list of region blocks.  */
981 		  degree[bb->index] = -1;
982 		  rgn_bb_table[idx] = bb->index;
983 		  RGN_NR_BLOCKS (nr_regions) = num_bbs;
984 		  RGN_BLOCKS (nr_regions) = idx++;
985                   RGN_DONT_CALC_DEPS (nr_regions) = 0;
986 		  RGN_HAS_REAL_EBB (nr_regions) = 0;
987 		  CONTAINING_RGN (bb->index) = nr_regions;
988 		  BLOCK_TO_BB (bb->index) = count = 0;
989 
990 		  /* Remove blocks from queue[] when their in degree
991 		     becomes zero.  Repeat until no blocks are left on the
992 		     list.  This produces a topological list of blocks in
993 		     the region.  */
994 		  while (tail >= 0)
995 		    {
996 		      if (head < 0)
997 			head = tail;
998 		      child = queue[head];
999 		      if (degree[child] == 0)
1000 			{
1001 			  edge e;
1002 
1003 			  degree[child] = -1;
1004 			  rgn_bb_table[idx++] = child;
1005 			  BLOCK_TO_BB (child) = ++count;
1006 			  CONTAINING_RGN (child) = nr_regions;
1007 			  queue[head] = queue[tail--];
1008 
1009 			  FOR_EACH_EDGE (e, ei,
1010 					 BASIC_BLOCK_FOR_FN (cfun,
1011 							     child)->succs)
1012 			    if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1013 			      --degree[e->dest->index];
1014 			}
1015 		      else
1016 			--head;
1017 		    }
1018 		  ++nr_regions;
1019 		}
1020               else if (extend_regions_p)
1021                 {
1022                   /* Restore DEGREE.  */
1023                   int *t = degree;
1024 
1025                   degree = degree1;
1026                   degree1 = t;
1027 
1028                   /* And force successors of BB to be region heads.
1029 		     This may provide several smaller regions instead
1030 		     of one too_large region.  */
1031                   FOR_EACH_EDGE (e, ei, bb->succs)
1032 		    if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1033                       bitmap_set_bit (extended_rgn_header, e->dest->index);
1034                 }
1035 	    }
1036 	}
1037       free (queue);
1038 
1039       if (extend_regions_p)
1040         {
1041           free (degree1);
1042 
1043           bitmap_ior (header, header, extended_rgn_header);
1044           sbitmap_free (extended_rgn_header);
1045 
1046           extend_rgns (degree, &idx, header, max_hdr);
1047         }
1048     }
1049 
1050   /* Any block that did not end up in a region is placed into a region
1051      by itself.  */
1052   FOR_EACH_BB_FN (bb, cfun)
1053     if (degree[bb->index] >= 0)
1054       {
1055 	rgn_bb_table[idx] = bb->index;
1056 	RGN_NR_BLOCKS (nr_regions) = 1;
1057 	RGN_BLOCKS (nr_regions) = idx++;
1058         RGN_DONT_CALC_DEPS (nr_regions) = 0;
1059 	RGN_HAS_REAL_EBB (nr_regions) = 0;
1060 	CONTAINING_RGN (bb->index) = nr_regions++;
1061 	BLOCK_TO_BB (bb->index) = 0;
1062       }
1063 
1064   free (max_hdr);
1065   free (degree);
1066   free (stack);
1067 }
1068 
1069 
1070 /* Wrapper function.
1071    If FLAG_SEL_SCHED_PIPELINING is set, then use custom function to form
1072    regions.  Otherwise just call find_rgns_haifa.  */
1073 static void
find_rgns(void)1074 find_rgns (void)
1075 {
1076   if (sel_sched_p () && flag_sel_sched_pipelining)
1077     sel_find_rgns ();
1078   else
1079     haifa_find_rgns ();
1080 }
1081 
1082 static int gather_region_statistics (int **);
1083 static void print_region_statistics (int *, int, int *, int);
1084 
1085 /* Calculate the histogram that shows the number of regions having the
1086    given number of basic blocks, and store it in the RSP array.  Return
1087    the size of this array.  */
1088 static int
gather_region_statistics(int ** rsp)1089 gather_region_statistics (int **rsp)
1090 {
1091   int i, *a = 0, a_sz = 0;
1092 
1093   /* a[i] is the number of regions that have (i + 1) basic blocks.  */
1094   for (i = 0; i < nr_regions; i++)
1095     {
1096       int nr_blocks = RGN_NR_BLOCKS (i);
1097 
1098       gcc_assert (nr_blocks >= 1);
1099 
1100       if (nr_blocks > a_sz)
1101 	{
1102 	  a = XRESIZEVEC (int, a, nr_blocks);
1103 	  do
1104 	    a[a_sz++] = 0;
1105 	  while (a_sz != nr_blocks);
1106 	}
1107 
1108       a[nr_blocks - 1]++;
1109     }
1110 
1111   *rsp = a;
1112   return a_sz;
1113 }
1114 
1115 /* Print regions statistics.  S1 and S2 denote the data before and after
1116    calling extend_rgns, respectively.  */
1117 static void
print_region_statistics(int * s1,int s1_sz,int * s2,int s2_sz)1118 print_region_statistics (int *s1, int s1_sz, int *s2, int s2_sz)
1119 {
1120   int i;
1121 
1122   /* We iterate until s2_sz because extend_rgns does not decrease
1123      the maximal region size.  */
1124   for (i = 1; i < s2_sz; i++)
1125     {
1126       int n1, n2;
1127 
1128       n2 = s2[i];
1129 
1130       if (n2 == 0)
1131 	continue;
1132 
1133       if (i >= s1_sz)
1134 	n1 = 0;
1135       else
1136 	n1 = s1[i];
1137 
1138       fprintf (sched_dump, ";; Region extension statistics: size %d: " \
1139 	       "was %d + %d more\n", i + 1, n1, n2 - n1);
1140     }
1141 }
1142 
1143 /* Extend regions.
1144    DEGREE - Array of incoming edge count, considering only
1145    the edges, that don't have their sources in formed regions yet.
1146    IDXP - pointer to the next available index in rgn_bb_table.
1147    HEADER - set of all region heads.
1148    LOOP_HDR - mapping from block to the containing loop
1149    (two blocks can reside within one region if they have
1150    the same loop header).  */
1151 void
extend_rgns(int * degree,int * idxp,sbitmap header,int * loop_hdr)1152 extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
1153 {
1154   int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr;
1155   int nblocks = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
1156 
1157   max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS);
1158 
1159   max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
1160 
1161   order = XNEWVEC (int, last_basic_block_for_fn (cfun));
1162   post_order_compute (order, false, false);
1163 
1164   for (i = nblocks - 1; i >= 0; i--)
1165     {
1166       int bbn = order[i];
1167       if (degree[bbn] >= 0)
1168 	{
1169 	  max_hdr[bbn] = bbn;
1170 	  rescan = 1;
1171 	}
1172       else
1173         /* This block already was processed in find_rgns.  */
1174         max_hdr[bbn] = -1;
1175     }
1176 
1177   /* The idea is to topologically walk through CFG in top-down order.
1178      During the traversal, if all the predecessors of a node are
1179      marked to be in the same region (they all have the same max_hdr),
1180      then current node is also marked to be a part of that region.
1181      Otherwise the node starts its own region.
1182      CFG should be traversed until no further changes are made.  On each
1183      iteration the set of the region heads is extended (the set of those
1184      blocks that have max_hdr[bbi] == bbi).  This set is upper bounded by the
1185      set of all basic blocks, thus the algorithm is guaranteed to
1186      terminate.  */
1187 
1188   while (rescan && iter < max_iter)
1189     {
1190       rescan = 0;
1191 
1192       for (i = nblocks - 1; i >= 0; i--)
1193 	{
1194 	  edge e;
1195 	  edge_iterator ei;
1196 	  int bbn = order[i];
1197 
1198 	  if (max_hdr[bbn] != -1 && !bitmap_bit_p (header, bbn))
1199 	    {
1200 	      int hdr = -1;
1201 
1202 	      FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, bbn)->preds)
1203 		{
1204 		  int predn = e->src->index;
1205 
1206 		  if (predn != ENTRY_BLOCK
1207 		      /* If pred wasn't processed in find_rgns.  */
1208 		      && max_hdr[predn] != -1
1209 		      /* And pred and bb reside in the same loop.
1210 			 (Or out of any loop).  */
1211 		      && loop_hdr[bbn] == loop_hdr[predn])
1212 		    {
1213 		      if (hdr == -1)
1214 			/* Then bb extends the containing region of pred.  */
1215 			hdr = max_hdr[predn];
1216 		      else if (hdr != max_hdr[predn])
1217 			/* Too bad, there are at least two predecessors
1218 			   that reside in different regions.  Thus, BB should
1219 			   begin its own region.  */
1220 			{
1221 			  hdr = bbn;
1222 			  break;
1223 			}
1224 		    }
1225 		  else
1226 		    /* BB starts its own region.  */
1227 		    {
1228 		      hdr = bbn;
1229 		      break;
1230 		    }
1231 		}
1232 
1233 	      if (hdr == bbn)
1234 		{
1235 		  /* If BB start its own region,
1236 		     update set of headers with BB.  */
1237 		  bitmap_set_bit (header, bbn);
1238 		  rescan = 1;
1239 		}
1240 	      else
1241 		gcc_assert (hdr != -1);
1242 
1243 	      max_hdr[bbn] = hdr;
1244 	    }
1245 	}
1246 
1247       iter++;
1248     }
1249 
1250   /* Statistics were gathered on the SPEC2000 package of tests with
1251      mainline weekly snapshot gcc-4.1-20051015 on ia64.
1252 
1253      Statistics for SPECint:
1254      1 iteration : 1751 cases (38.7%)
1255      2 iterations: 2770 cases (61.3%)
1256      Blocks wrapped in regions by find_rgns without extension: 18295 blocks
1257      Blocks wrapped in regions by 2 iterations in extend_rgns: 23821 blocks
1258      (We don't count single block regions here).
1259 
1260      Statistics for SPECfp:
1261      1 iteration : 621 cases (35.9%)
1262      2 iterations: 1110 cases (64.1%)
1263      Blocks wrapped in regions by find_rgns without extension: 6476 blocks
1264      Blocks wrapped in regions by 2 iterations in extend_rgns: 11155 blocks
1265      (We don't count single block regions here).
1266 
1267      By default we do at most 2 iterations.
1268      This can be overridden with max-sched-extend-regions-iters parameter:
1269      0 - disable region extension,
1270      N > 0 - do at most N iterations.  */
1271 
1272   if (sched_verbose && iter != 0)
1273     fprintf (sched_dump, ";; Region extension iterations: %d%s\n", iter,
1274 	     rescan ? "... failed" : "");
1275 
1276   if (!rescan && iter != 0)
1277     {
1278       int *s1 = NULL, s1_sz = 0;
1279 
1280       /* Save the old statistics for later printout.  */
1281       if (sched_verbose >= 6)
1282 	s1_sz = gather_region_statistics (&s1);
1283 
1284       /* We have succeeded.  Now assemble the regions.  */
1285       for (i = nblocks - 1; i >= 0; i--)
1286 	{
1287 	  int bbn = order[i];
1288 
1289 	  if (max_hdr[bbn] == bbn)
1290 	    /* BBN is a region head.  */
1291 	    {
1292 	      edge e;
1293 	      edge_iterator ei;
1294 	      int num_bbs = 0, j, num_insns = 0, large;
1295 
1296 	      large = too_large (bbn, &num_bbs, &num_insns);
1297 
1298 	      degree[bbn] = -1;
1299 	      rgn_bb_table[idx] = bbn;
1300 	      RGN_BLOCKS (nr_regions) = idx++;
1301 	      RGN_DONT_CALC_DEPS (nr_regions) = 0;
1302 	      RGN_HAS_REAL_EBB (nr_regions) = 0;
1303 	      CONTAINING_RGN (bbn) = nr_regions;
1304 	      BLOCK_TO_BB (bbn) = 0;
1305 
1306 	      FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, bbn)->succs)
1307 		if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1308 		  degree[e->dest->index]--;
1309 
1310 	      if (!large)
1311 		/* Here we check whether the region is too_large.  */
1312 		for (j = i - 1; j >= 0; j--)
1313 		  {
1314 		    int succn = order[j];
1315 		    if (max_hdr[succn] == bbn)
1316 		      {
1317 			if ((large = too_large (succn, &num_bbs, &num_insns)))
1318 			  break;
1319 		      }
1320 		  }
1321 
1322 	      if (large)
1323 		/* If the region is too_large, then wrap every block of
1324 		   the region into single block region.
1325 		   Here we wrap region head only.  Other blocks are
1326 		   processed in the below cycle.  */
1327 		{
1328 		  RGN_NR_BLOCKS (nr_regions) = 1;
1329 		  nr_regions++;
1330 		}
1331 
1332 	      num_bbs = 1;
1333 
1334 	      for (j = i - 1; j >= 0; j--)
1335 		{
1336 		  int succn = order[j];
1337 
1338 		  if (max_hdr[succn] == bbn)
1339 		    /* This cycle iterates over all basic blocks, that
1340 		       are supposed to be in the region with head BBN,
1341 		       and wraps them into that region (or in single
1342 		       block region).  */
1343 		    {
1344 		      gcc_assert (degree[succn] == 0);
1345 
1346 		      degree[succn] = -1;
1347 		      rgn_bb_table[idx] = succn;
1348 		      BLOCK_TO_BB (succn) = large ? 0 : num_bbs++;
1349 		      CONTAINING_RGN (succn) = nr_regions;
1350 
1351 		      if (large)
1352 			/* Wrap SUCCN into single block region.  */
1353 			{
1354 			  RGN_BLOCKS (nr_regions) = idx;
1355 			  RGN_NR_BLOCKS (nr_regions) = 1;
1356 			  RGN_DONT_CALC_DEPS (nr_regions) = 0;
1357 			  RGN_HAS_REAL_EBB (nr_regions) = 0;
1358 			  nr_regions++;
1359 			}
1360 
1361 		      idx++;
1362 
1363 		      FOR_EACH_EDGE (e, ei,
1364 				     BASIC_BLOCK_FOR_FN (cfun, succn)->succs)
1365 			if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1366 			  degree[e->dest->index]--;
1367 		    }
1368 		}
1369 
1370 	      if (!large)
1371 		{
1372 		  RGN_NR_BLOCKS (nr_regions) = num_bbs;
1373 		  nr_regions++;
1374 		}
1375 	    }
1376 	}
1377 
1378       if (sched_verbose >= 6)
1379 	{
1380 	  int *s2, s2_sz;
1381 
1382           /* Get the new statistics and print the comparison with the
1383              one before calling this function.  */
1384 	  s2_sz = gather_region_statistics (&s2);
1385 	  print_region_statistics (s1, s1_sz, s2, s2_sz);
1386 	  free (s1);
1387 	  free (s2);
1388 	}
1389     }
1390 
1391   free (order);
1392   free (max_hdr);
1393 
1394   *idxp = idx;
1395 }
1396 
1397 /* Functions for regions scheduling information.  */
1398 
1399 /* Compute dominators, probability, and potential-split-edges of bb.
1400    Assume that these values were already computed for bb's predecessors.  */
1401 
1402 static void
compute_dom_prob_ps(int bb)1403 compute_dom_prob_ps (int bb)
1404 {
1405   edge_iterator in_ei;
1406   edge in_edge;
1407 
1408   /* We shouldn't have any real ebbs yet.  */
1409   gcc_assert (ebb_head [bb] == bb + current_blocks);
1410 
1411   if (IS_RGN_ENTRY (bb))
1412     {
1413       bitmap_set_bit (dom[bb], 0);
1414       prob[bb] = REG_BR_PROB_BASE;
1415       return;
1416     }
1417 
1418   prob[bb] = 0;
1419 
1420   /* Initialize dom[bb] to '111..1'.  */
1421   bitmap_ones (dom[bb]);
1422 
1423   FOR_EACH_EDGE (in_edge, in_ei,
1424 		 BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb))->preds)
1425     {
1426       int pred_bb;
1427       edge out_edge;
1428       edge_iterator out_ei;
1429 
1430       if (in_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1431 	continue;
1432 
1433       pred_bb = BLOCK_TO_BB (in_edge->src->index);
1434       bitmap_and (dom[bb], dom[bb], dom[pred_bb]);
1435       bitmap_ior (ancestor_edges[bb],
1436 		      ancestor_edges[bb], ancestor_edges[pred_bb]);
1437 
1438       bitmap_set_bit (ancestor_edges[bb], EDGE_TO_BIT (in_edge));
1439 
1440       bitmap_ior (pot_split[bb], pot_split[bb], pot_split[pred_bb]);
1441 
1442       FOR_EACH_EDGE (out_edge, out_ei, in_edge->src->succs)
1443 	bitmap_set_bit (pot_split[bb], EDGE_TO_BIT (out_edge));
1444 
1445       prob[bb] += combine_probabilities
1446 		 (prob[pred_bb],
1447 		  in_edge->probability.initialized_p ()
1448 		  ? in_edge->probability.to_reg_br_prob_base ()
1449 		  : 0);
1450       // The rounding divide in combine_probabilities can result in an extra
1451       // probability increment propagating along 50-50 edges. Eventually when
1452       // the edges re-merge, the accumulated probability can go slightly above
1453       // REG_BR_PROB_BASE.
1454       if (prob[bb] > REG_BR_PROB_BASE)
1455         prob[bb] = REG_BR_PROB_BASE;
1456     }
1457 
1458   bitmap_set_bit (dom[bb], bb);
1459   bitmap_and_compl (pot_split[bb], pot_split[bb], ancestor_edges[bb]);
1460 
1461   if (sched_verbose >= 2)
1462     fprintf (sched_dump, ";;  bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb),
1463 	     (100 * prob[bb]) / REG_BR_PROB_BASE);
1464 }
1465 
1466 /* Functions for target info.  */
1467 
1468 /* Compute in BL the list of split-edges of bb_src relatively to bb_trg.
1469    Note that bb_trg dominates bb_src.  */
1470 
1471 static void
split_edges(int bb_src,int bb_trg,edgelst * bl)1472 split_edges (int bb_src, int bb_trg, edgelst *bl)
1473 {
1474   auto_sbitmap src (SBITMAP_SIZE (pot_split[bb_src]));
1475   bitmap_copy (src, pot_split[bb_src]);
1476 
1477   bitmap_and_compl (src, src, pot_split[bb_trg]);
1478   extract_edgelst (src, bl);
1479 }
1480 
1481 /* Find the valid candidate-source-blocks for the target block TRG, compute
1482    their probability, and check if they are speculative or not.
1483    For speculative sources, compute their update-blocks and split-blocks.  */
1484 
1485 static void
compute_trg_info(int trg)1486 compute_trg_info (int trg)
1487 {
1488   candidate *sp;
1489   edgelst el = { NULL, 0 };
1490   int i, j, k, update_idx;
1491   basic_block block;
1492   edge_iterator ei;
1493   edge e;
1494 
1495   candidate_table = XNEWVEC (candidate, current_nr_blocks);
1496 
1497   bblst_last = 0;
1498   /* bblst_table holds split blocks and update blocks for each block after
1499      the current one in the region.  split blocks and update blocks are
1500      the TO blocks of region edges, so there can be at most rgn_nr_edges
1501      of them.  */
1502   bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges;
1503   bblst_table = XNEWVEC (basic_block, bblst_size);
1504 
1505   edgelst_last = 0;
1506   edgelst_table = XNEWVEC (edge, rgn_nr_edges);
1507 
1508   /* Define some of the fields for the target bb as well.  */
1509   sp = candidate_table + trg;
1510   sp->is_valid = 1;
1511   sp->is_speculative = 0;
1512   sp->src_prob = REG_BR_PROB_BASE;
1513 
1514   auto_sbitmap visited (last_basic_block_for_fn (cfun));
1515 
1516   for (i = trg + 1; i < current_nr_blocks; i++)
1517     {
1518       sp = candidate_table + i;
1519 
1520       sp->is_valid = IS_DOMINATED (i, trg);
1521       if (sp->is_valid)
1522 	{
1523 	  int tf = prob[trg], cf = prob[i];
1524 
1525 	  /* In CFGs with low probability edges TF can possibly be zero.  */
1526 	  sp->src_prob = (tf ? GCOV_COMPUTE_SCALE (cf, tf) : 0);
1527 	  sp->is_valid = (sp->src_prob >= min_spec_prob);
1528 	}
1529 
1530       if (sp->is_valid)
1531 	{
1532 	  split_edges (i, trg, &el);
1533 	  sp->is_speculative = (el.nr_members) ? 1 : 0;
1534 	  if (sp->is_speculative && !flag_schedule_speculative)
1535 	    sp->is_valid = 0;
1536 	}
1537 
1538       if (sp->is_valid)
1539 	{
1540 	  /* Compute split blocks and store them in bblst_table.
1541 	     The TO block of every split edge is a split block.  */
1542 	  sp->split_bbs.first_member = &bblst_table[bblst_last];
1543 	  sp->split_bbs.nr_members = el.nr_members;
1544 	  for (j = 0; j < el.nr_members; bblst_last++, j++)
1545 	    bblst_table[bblst_last] = el.first_member[j]->dest;
1546 	  sp->update_bbs.first_member = &bblst_table[bblst_last];
1547 
1548 	  /* Compute update blocks and store them in bblst_table.
1549 	     For every split edge, look at the FROM block, and check
1550 	     all out edges.  For each out edge that is not a split edge,
1551 	     add the TO block to the update block list.  This list can end
1552 	     up with a lot of duplicates.  We need to weed them out to avoid
1553 	     overrunning the end of the bblst_table.  */
1554 
1555 	  update_idx = 0;
1556 	  bitmap_clear (visited);
1557 	  for (j = 0; j < el.nr_members; j++)
1558 	    {
1559 	      block = el.first_member[j]->src;
1560 	      FOR_EACH_EDGE (e, ei, block->succs)
1561 		{
1562 		  if (!bitmap_bit_p (visited, e->dest->index))
1563 		    {
1564 		      for (k = 0; k < el.nr_members; k++)
1565 			if (e == el.first_member[k])
1566 			  break;
1567 
1568 		      if (k >= el.nr_members)
1569 			{
1570 			  bblst_table[bblst_last++] = e->dest;
1571 			  bitmap_set_bit (visited, e->dest->index);
1572 			  update_idx++;
1573 			}
1574 		    }
1575 		}
1576 	    }
1577 	  sp->update_bbs.nr_members = update_idx;
1578 
1579 	  /* Make sure we didn't overrun the end of bblst_table.  */
1580 	  gcc_assert (bblst_last <= bblst_size);
1581 	}
1582       else
1583 	{
1584 	  sp->split_bbs.nr_members = sp->update_bbs.nr_members = 0;
1585 
1586 	  sp->is_speculative = 0;
1587 	  sp->src_prob = 0;
1588 	}
1589     }
1590 }
1591 
1592 /* Free the computed target info.  */
1593 static void
free_trg_info(void)1594 free_trg_info (void)
1595 {
1596   free (candidate_table);
1597   free (bblst_table);
1598   free (edgelst_table);
1599 }
1600 
1601 /* Print candidates info, for debugging purposes.  Callable from debugger.  */
1602 
1603 DEBUG_FUNCTION void
debug_candidate(int i)1604 debug_candidate (int i)
1605 {
1606   if (!candidate_table[i].is_valid)
1607     return;
1608 
1609   if (candidate_table[i].is_speculative)
1610     {
1611       int j;
1612       fprintf (sched_dump, "src b %d bb %d speculative \n", BB_TO_BLOCK (i), i);
1613 
1614       fprintf (sched_dump, "split path: ");
1615       for (j = 0; j < candidate_table[i].split_bbs.nr_members; j++)
1616 	{
1617 	  int b = candidate_table[i].split_bbs.first_member[j]->index;
1618 
1619 	  fprintf (sched_dump, " %d ", b);
1620 	}
1621       fprintf (sched_dump, "\n");
1622 
1623       fprintf (sched_dump, "update path: ");
1624       for (j = 0; j < candidate_table[i].update_bbs.nr_members; j++)
1625 	{
1626 	  int b = candidate_table[i].update_bbs.first_member[j]->index;
1627 
1628 	  fprintf (sched_dump, " %d ", b);
1629 	}
1630       fprintf (sched_dump, "\n");
1631     }
1632   else
1633     {
1634       fprintf (sched_dump, " src %d equivalent\n", BB_TO_BLOCK (i));
1635     }
1636 }
1637 
1638 /* Print candidates info, for debugging purposes.  Callable from debugger.  */
1639 
1640 DEBUG_FUNCTION void
debug_candidates(int trg)1641 debug_candidates (int trg)
1642 {
1643   int i;
1644 
1645   fprintf (sched_dump, "----------- candidate table: target: b=%d bb=%d ---\n",
1646 	   BB_TO_BLOCK (trg), trg);
1647   for (i = trg + 1; i < current_nr_blocks; i++)
1648     debug_candidate (i);
1649 }
1650 
1651 /* Functions for speculative scheduling.  */
1652 
1653 static bitmap_head not_in_df;
1654 
1655 /* Return 0 if x is a set of a register alive in the beginning of one
1656    of the split-blocks of src, otherwise return 1.  */
1657 
1658 static int
check_live_1(int src,rtx x)1659 check_live_1 (int src, rtx x)
1660 {
1661   int i;
1662   int regno;
1663   rtx reg = SET_DEST (x);
1664 
1665   if (reg == 0)
1666     return 1;
1667 
1668   while (GET_CODE (reg) == SUBREG
1669 	 || GET_CODE (reg) == ZERO_EXTRACT
1670 	 || GET_CODE (reg) == STRICT_LOW_PART)
1671     reg = XEXP (reg, 0);
1672 
1673   if (GET_CODE (reg) == PARALLEL)
1674     {
1675       int i;
1676 
1677       for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1678 	if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1679 	  if (check_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0)))
1680 	    return 1;
1681 
1682       return 0;
1683     }
1684 
1685   if (!REG_P (reg))
1686     return 1;
1687 
1688   regno = REGNO (reg);
1689 
1690   if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno])
1691     {
1692       /* Global registers are assumed live.  */
1693       return 0;
1694     }
1695   else
1696     {
1697       if (regno < FIRST_PSEUDO_REGISTER)
1698 	{
1699 	  /* Check for hard registers.  */
1700 	  int j = REG_NREGS (reg);
1701 	  while (--j >= 0)
1702 	    {
1703 	      for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1704 		{
1705 		  basic_block b = candidate_table[src].split_bbs.first_member[i];
1706 		  int t = bitmap_bit_p (&not_in_df, b->index);
1707 
1708 		  /* We can have split blocks, that were recently generated.
1709 		     Such blocks are always outside current region.  */
1710 		  gcc_assert (!t || (CONTAINING_RGN (b->index)
1711 				     != CONTAINING_RGN (BB_TO_BLOCK (src))));
1712 
1713 		  if (t || REGNO_REG_SET_P (df_get_live_in (b), regno + j))
1714 		    return 0;
1715 		}
1716 	    }
1717 	}
1718       else
1719 	{
1720 	  /* Check for pseudo registers.  */
1721 	  for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1722 	    {
1723 	      basic_block b = candidate_table[src].split_bbs.first_member[i];
1724 	      int t = bitmap_bit_p (&not_in_df, b->index);
1725 
1726 	      gcc_assert (!t || (CONTAINING_RGN (b->index)
1727 				 != CONTAINING_RGN (BB_TO_BLOCK (src))));
1728 
1729 	      if (t || REGNO_REG_SET_P (df_get_live_in (b), regno))
1730 		return 0;
1731 	    }
1732 	}
1733     }
1734 
1735   return 1;
1736 }
1737 
1738 /* If x is a set of a register R, mark that R is alive in the beginning
1739    of every update-block of src.  */
1740 
1741 static void
update_live_1(int src,rtx x)1742 update_live_1 (int src, rtx x)
1743 {
1744   int i;
1745   int regno;
1746   rtx reg = SET_DEST (x);
1747 
1748   if (reg == 0)
1749     return;
1750 
1751   while (GET_CODE (reg) == SUBREG
1752 	 || GET_CODE (reg) == ZERO_EXTRACT
1753 	 || GET_CODE (reg) == STRICT_LOW_PART)
1754     reg = XEXP (reg, 0);
1755 
1756   if (GET_CODE (reg) == PARALLEL)
1757     {
1758       int i;
1759 
1760       for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1761 	if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1762 	  update_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0));
1763 
1764       return;
1765     }
1766 
1767   if (!REG_P (reg))
1768     return;
1769 
1770   /* Global registers are always live, so the code below does not apply
1771      to them.  */
1772 
1773   regno = REGNO (reg);
1774 
1775   if (! HARD_REGISTER_NUM_P (regno)
1776       || !global_regs[regno])
1777     {
1778       for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
1779 	{
1780 	  basic_block b = candidate_table[src].update_bbs.first_member[i];
1781 	  bitmap_set_range (df_get_live_in (b), regno, REG_NREGS (reg));
1782 	}
1783     }
1784 }
1785 
1786 /* Return 1 if insn can be speculatively moved from block src to trg,
1787    otherwise return 0.  Called before first insertion of insn to
1788    ready-list or before the scheduling.  */
1789 
1790 static int
check_live(rtx_insn * insn,int src)1791 check_live (rtx_insn *insn, int src)
1792 {
1793   /* Find the registers set by instruction.  */
1794   if (GET_CODE (PATTERN (insn)) == SET
1795       || GET_CODE (PATTERN (insn)) == CLOBBER)
1796     return check_live_1 (src, PATTERN (insn));
1797   else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1798     {
1799       int j;
1800       for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1801 	if ((GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1802 	     || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1803 	    && !check_live_1 (src, XVECEXP (PATTERN (insn), 0, j)))
1804 	  return 0;
1805 
1806       return 1;
1807     }
1808 
1809   return 1;
1810 }
1811 
1812 /* Update the live registers info after insn was moved speculatively from
1813    block src to trg.  */
1814 
1815 static void
update_live(rtx_insn * insn,int src)1816 update_live (rtx_insn *insn, int src)
1817 {
1818   /* Find the registers set by instruction.  */
1819   if (GET_CODE (PATTERN (insn)) == SET
1820       || GET_CODE (PATTERN (insn)) == CLOBBER)
1821     update_live_1 (src, PATTERN (insn));
1822   else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1823     {
1824       int j;
1825       for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1826 	if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1827 	    || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1828 	  update_live_1 (src, XVECEXP (PATTERN (insn), 0, j));
1829     }
1830 }
1831 
1832 /* Nonzero if block bb_to is equal to, or reachable from block bb_from.  */
1833 #define IS_REACHABLE(bb_from, bb_to)					\
1834   (bb_from == bb_to							\
1835    || IS_RGN_ENTRY (bb_from)						\
1836    || (bitmap_bit_p (ancestor_edges[bb_to],					\
1837 	 EDGE_TO_BIT (single_pred_edge (BASIC_BLOCK_FOR_FN (cfun, \
1838 							    BB_TO_BLOCK (bb_from)))))))
1839 
1840 /* Turns on the fed_by_spec_load flag for insns fed by load_insn.  */
1841 
1842 static void
set_spec_fed(rtx load_insn)1843 set_spec_fed (rtx load_insn)
1844 {
1845   sd_iterator_def sd_it;
1846   dep_t dep;
1847 
1848   FOR_EACH_DEP (load_insn, SD_LIST_FORW, sd_it, dep)
1849     if (DEP_TYPE (dep) == REG_DEP_TRUE)
1850       FED_BY_SPEC_LOAD (DEP_CON (dep)) = 1;
1851 }
1852 
1853 /* On the path from the insn to load_insn_bb, find a conditional
1854 branch depending on insn, that guards the speculative load.  */
1855 
1856 static int
find_conditional_protection(rtx_insn * insn,int load_insn_bb)1857 find_conditional_protection (rtx_insn *insn, int load_insn_bb)
1858 {
1859   sd_iterator_def sd_it;
1860   dep_t dep;
1861 
1862   /* Iterate through DEF-USE forward dependences.  */
1863   FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
1864     {
1865       rtx_insn *next = DEP_CON (dep);
1866 
1867       if ((CONTAINING_RGN (BLOCK_NUM (next)) ==
1868 	   CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb)))
1869 	  && IS_REACHABLE (INSN_BB (next), load_insn_bb)
1870 	  && load_insn_bb != INSN_BB (next)
1871 	  && DEP_TYPE (dep) == REG_DEP_TRUE
1872 	  && (JUMP_P (next)
1873 	      || find_conditional_protection (next, load_insn_bb)))
1874 	return 1;
1875     }
1876   return 0;
1877 }				/* find_conditional_protection */
1878 
1879 /* Returns 1 if the same insn1 that participates in the computation
1880    of load_insn's address is feeding a conditional branch that is
1881    guarding on load_insn. This is true if we find two DEF-USE
1882    chains:
1883    insn1 -> ... -> conditional-branch
1884    insn1 -> ... -> load_insn,
1885    and if a flow path exists:
1886    insn1 -> ... -> conditional-branch -> ... -> load_insn,
1887    and if insn1 is on the path
1888    region-entry -> ... -> bb_trg -> ... load_insn.
1889 
1890    Locate insn1 by climbing on INSN_BACK_DEPS from load_insn.
1891    Locate the branch by following INSN_FORW_DEPS from insn1.  */
1892 
1893 static int
is_conditionally_protected(rtx load_insn,int bb_src,int bb_trg)1894 is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg)
1895 {
1896   sd_iterator_def sd_it;
1897   dep_t dep;
1898 
1899   FOR_EACH_DEP (load_insn, SD_LIST_BACK, sd_it, dep)
1900     {
1901       rtx_insn *insn1 = DEP_PRO (dep);
1902 
1903       /* Must be a DEF-USE dependence upon non-branch.  */
1904       if (DEP_TYPE (dep) != REG_DEP_TRUE
1905 	  || JUMP_P (insn1))
1906 	continue;
1907 
1908       /* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn.  */
1909       if (INSN_BB (insn1) == bb_src
1910 	  || (CONTAINING_RGN (BLOCK_NUM (insn1))
1911 	      != CONTAINING_RGN (BB_TO_BLOCK (bb_src)))
1912 	  || (!IS_REACHABLE (bb_trg, INSN_BB (insn1))
1913 	      && !IS_REACHABLE (INSN_BB (insn1), bb_trg)))
1914 	continue;
1915 
1916       /* Now search for the conditional-branch.  */
1917       if (find_conditional_protection (insn1, bb_src))
1918 	return 1;
1919 
1920       /* Recursive step: search another insn1, "above" current insn1.  */
1921       return is_conditionally_protected (insn1, bb_src, bb_trg);
1922     }
1923 
1924   /* The chain does not exist.  */
1925   return 0;
1926 }				/* is_conditionally_protected */
1927 
1928 /* Returns 1 if a clue for "similar load" 'insn2' is found, and hence
1929    load_insn can move speculatively from bb_src to bb_trg.  All the
1930    following must hold:
1931 
1932    (1) both loads have 1 base register (PFREE_CANDIDATEs).
1933    (2) load_insn and load1 have a def-use dependence upon
1934    the same insn 'insn1'.
1935    (3) either load2 is in bb_trg, or:
1936    - there's only one split-block, and
1937    - load1 is on the escape path, and
1938 
1939    From all these we can conclude that the two loads access memory
1940    addresses that differ at most by a constant, and hence if moving
1941    load_insn would cause an exception, it would have been caused by
1942    load2 anyhow.  */
1943 
1944 static int
is_pfree(rtx load_insn,int bb_src,int bb_trg)1945 is_pfree (rtx load_insn, int bb_src, int bb_trg)
1946 {
1947   sd_iterator_def back_sd_it;
1948   dep_t back_dep;
1949   candidate *candp = candidate_table + bb_src;
1950 
1951   if (candp->split_bbs.nr_members != 1)
1952     /* Must have exactly one escape block.  */
1953     return 0;
1954 
1955   FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
1956     {
1957       rtx_insn *insn1 = DEP_PRO (back_dep);
1958 
1959       if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
1960 	/* Found a DEF-USE dependence (insn1, load_insn).  */
1961 	{
1962 	  sd_iterator_def fore_sd_it;
1963 	  dep_t fore_dep;
1964 
1965 	  FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
1966 	    {
1967 	      rtx_insn *insn2 = DEP_CON (fore_dep);
1968 
1969 	      if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
1970 		{
1971 		  /* Found a DEF-USE dependence (insn1, insn2).  */
1972 		  if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
1973 		    /* insn2 not guaranteed to be a 1 base reg load.  */
1974 		    continue;
1975 
1976 		  if (INSN_BB (insn2) == bb_trg)
1977 		    /* insn2 is the similar load, in the target block.  */
1978 		    return 1;
1979 
1980 		  if (*(candp->split_bbs.first_member) == BLOCK_FOR_INSN (insn2))
1981 		    /* insn2 is a similar load, in a split-block.  */
1982 		    return 1;
1983 		}
1984 	    }
1985 	}
1986     }
1987 
1988   /* Couldn't find a similar load.  */
1989   return 0;
1990 }				/* is_pfree */
1991 
1992 /* Return 1 if load_insn is prisky (i.e. if load_insn is fed by
1993    a load moved speculatively, or if load_insn is protected by
1994    a compare on load_insn's address).  */
1995 
1996 static int
is_prisky(rtx load_insn,int bb_src,int bb_trg)1997 is_prisky (rtx load_insn, int bb_src, int bb_trg)
1998 {
1999   if (FED_BY_SPEC_LOAD (load_insn))
2000     return 1;
2001 
2002   if (sd_lists_empty_p (load_insn, SD_LIST_BACK))
2003     /* Dependence may 'hide' out of the region.  */
2004     return 1;
2005 
2006   if (is_conditionally_protected (load_insn, bb_src, bb_trg))
2007     return 1;
2008 
2009   return 0;
2010 }
2011 
2012 /* Insn is a candidate to be moved speculatively from bb_src to bb_trg.
2013    Return 1 if insn is exception-free (and the motion is valid)
2014    and 0 otherwise.  */
2015 
2016 static int
is_exception_free(rtx_insn * insn,int bb_src,int bb_trg)2017 is_exception_free (rtx_insn *insn, int bb_src, int bb_trg)
2018 {
2019   int insn_class = haifa_classify_insn (insn);
2020 
2021   /* Handle non-load insns.  */
2022   switch (insn_class)
2023     {
2024     case TRAP_FREE:
2025       return 1;
2026     case TRAP_RISKY:
2027       return 0;
2028     default:;
2029     }
2030 
2031   /* Handle loads.  */
2032   if (!flag_schedule_speculative_load)
2033     return 0;
2034   IS_LOAD_INSN (insn) = 1;
2035   switch (insn_class)
2036     {
2037     case IFREE:
2038       return (1);
2039     case IRISKY:
2040       return 0;
2041     case PFREE_CANDIDATE:
2042       if (is_pfree (insn, bb_src, bb_trg))
2043 	return 1;
2044       /* Don't 'break' here: PFREE-candidate is also PRISKY-candidate.  */
2045       /* FALLTHRU */
2046     case PRISKY_CANDIDATE:
2047       if (!flag_schedule_speculative_load_dangerous
2048 	  || is_prisky (insn, bb_src, bb_trg))
2049 	return 0;
2050       break;
2051     default:;
2052     }
2053 
2054   return flag_schedule_speculative_load_dangerous;
2055 }
2056 
2057 /* The number of insns from the current block scheduled so far.  */
2058 static int sched_target_n_insns;
2059 /* The number of insns from the current block to be scheduled in total.  */
2060 static int target_n_insns;
2061 /* The number of insns from the entire region scheduled so far.  */
2062 static int sched_n_insns;
2063 
2064 /* Implementations of the sched_info functions for region scheduling.  */
2065 static void init_ready_list (void);
2066 static int can_schedule_ready_p (rtx_insn *);
2067 static void begin_schedule_ready (rtx_insn *);
2068 static ds_t new_ready (rtx_insn *, ds_t);
2069 static int schedule_more_p (void);
2070 static const char *rgn_print_insn (const rtx_insn *, int);
2071 static int rgn_rank (rtx_insn *, rtx_insn *);
2072 static void compute_jump_reg_dependencies (rtx, regset);
2073 
2074 /* Functions for speculative scheduling.  */
2075 static void rgn_add_remove_insn (rtx_insn *, int);
2076 static void rgn_add_block (basic_block, basic_block);
2077 static void rgn_fix_recovery_cfg (int, int, int);
2078 static basic_block advance_target_bb (basic_block, rtx_insn *);
2079 
2080 /* Return nonzero if there are more insns that should be scheduled.  */
2081 
2082 static int
schedule_more_p(void)2083 schedule_more_p (void)
2084 {
2085   return sched_target_n_insns < target_n_insns;
2086 }
2087 
2088 /* Add all insns that are initially ready to the ready list READY.  Called
2089    once before scheduling a set of insns.  */
2090 
2091 static void
init_ready_list(void)2092 init_ready_list (void)
2093 {
2094   rtx_insn *prev_head = current_sched_info->prev_head;
2095   rtx_insn *next_tail = current_sched_info->next_tail;
2096   int bb_src;
2097   rtx_insn *insn;
2098 
2099   target_n_insns = 0;
2100   sched_target_n_insns = 0;
2101   sched_n_insns = 0;
2102 
2103   /* Print debugging information.  */
2104   if (sched_verbose >= 5)
2105     debug_rgn_dependencies (target_bb);
2106 
2107   /* Prepare current target block info.  */
2108   if (current_nr_blocks > 1)
2109     compute_trg_info (target_bb);
2110 
2111   /* Initialize ready list with all 'ready' insns in target block.
2112      Count number of insns in the target block being scheduled.  */
2113   for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
2114     {
2115       gcc_assert (TODO_SPEC (insn) == HARD_DEP || TODO_SPEC (insn) == DEP_POSTPONED);
2116       TODO_SPEC (insn) = HARD_DEP;
2117       try_ready (insn);
2118       target_n_insns++;
2119 
2120       gcc_assert (!(TODO_SPEC (insn) & BEGIN_CONTROL));
2121     }
2122 
2123   /* Add to ready list all 'ready' insns in valid source blocks.
2124      For speculative insns, check-live, exception-free, and
2125      issue-delay.  */
2126   for (bb_src = target_bb + 1; bb_src < current_nr_blocks; bb_src++)
2127     if (IS_VALID (bb_src))
2128       {
2129 	rtx_insn *src_head;
2130 	rtx_insn *src_next_tail;
2131 	rtx_insn *tail, *head;
2132 
2133 	get_ebb_head_tail (EBB_FIRST_BB (bb_src), EBB_LAST_BB (bb_src),
2134 			   &head, &tail);
2135 	src_next_tail = NEXT_INSN (tail);
2136 	src_head = head;
2137 
2138 	for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn))
2139 	  if (INSN_P (insn))
2140 	    {
2141 	      gcc_assert (TODO_SPEC (insn) == HARD_DEP || TODO_SPEC (insn) == DEP_POSTPONED);
2142 	      TODO_SPEC (insn) = HARD_DEP;
2143 	      try_ready (insn);
2144 	    }
2145       }
2146 }
2147 
2148 /* Called after taking INSN from the ready list.  Returns nonzero if this
2149    insn can be scheduled, nonzero if we should silently discard it.  */
2150 
2151 static int
can_schedule_ready_p(rtx_insn * insn)2152 can_schedule_ready_p (rtx_insn *insn)
2153 {
2154   /* An interblock motion?  */
2155   if (INSN_BB (insn) != target_bb && IS_SPECULATIVE_INSN (insn))
2156     {
2157       /* Cannot schedule this insn unless all operands are live.  */
2158       if (!check_live (insn, INSN_BB (insn)))
2159 	return 0;
2160 
2161       /* Should not move expensive instructions speculatively.  */
2162       if (GET_CODE (PATTERN (insn)) != CLOBBER
2163 	  && !targetm.sched.can_speculate_insn (insn))
2164 	return 0;
2165     }
2166 
2167   return 1;
2168 }
2169 
2170 /* Updates counter and other information.  Split from can_schedule_ready_p ()
2171    because when we schedule insn speculatively then insn passed to
2172    can_schedule_ready_p () differs from the one passed to
2173    begin_schedule_ready ().  */
2174 static void
begin_schedule_ready(rtx_insn * insn)2175 begin_schedule_ready (rtx_insn *insn)
2176 {
2177   /* An interblock motion?  */
2178   if (INSN_BB (insn) != target_bb)
2179     {
2180       if (IS_SPECULATIVE_INSN (insn))
2181 	{
2182 	  gcc_assert (check_live (insn, INSN_BB (insn)));
2183 
2184 	  update_live (insn, INSN_BB (insn));
2185 
2186 	  /* For speculative load, mark insns fed by it.  */
2187 	  if (IS_LOAD_INSN (insn) || FED_BY_SPEC_LOAD (insn))
2188 	    set_spec_fed (insn);
2189 
2190 	  nr_spec++;
2191 	}
2192       nr_inter++;
2193     }
2194   else
2195     {
2196       /* In block motion.  */
2197       sched_target_n_insns++;
2198     }
2199   sched_n_insns++;
2200 }
2201 
2202 /* Called after INSN has all its hard dependencies resolved and the speculation
2203    of type TS is enough to overcome them all.
2204    Return nonzero if it should be moved to the ready list or the queue, or zero
2205    if we should silently discard it.  */
2206 static ds_t
new_ready(rtx_insn * next,ds_t ts)2207 new_ready (rtx_insn *next, ds_t ts)
2208 {
2209   if (INSN_BB (next) != target_bb)
2210     {
2211       int not_ex_free = 0;
2212 
2213       /* For speculative insns, before inserting to ready/queue,
2214 	 check live, exception-free, and issue-delay.  */
2215       if (!IS_VALID (INSN_BB (next))
2216 	  || CANT_MOVE (next)
2217 	  || (IS_SPECULATIVE_INSN (next)
2218 	      && ((recog_memoized (next) >= 0
2219 		   && min_insn_conflict_delay (curr_state, next, next)
2220                    > PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY))
2221                   || IS_SPECULATION_CHECK_P (next)
2222 		  || !check_live (next, INSN_BB (next))
2223 		  || (not_ex_free = !is_exception_free (next, INSN_BB (next),
2224 							target_bb)))))
2225 	{
2226 	  if (not_ex_free
2227 	      /* We are here because is_exception_free () == false.
2228 		 But we possibly can handle that with control speculation.  */
2229 	      && sched_deps_info->generate_spec_deps
2230 	      && spec_info->mask & BEGIN_CONTROL)
2231 	    {
2232 	      ds_t new_ds;
2233 
2234 	      /* Add control speculation to NEXT's dependency type.  */
2235 	      new_ds = set_dep_weak (ts, BEGIN_CONTROL, MAX_DEP_WEAK);
2236 
2237 	      /* Check if NEXT can be speculated with new dependency type.  */
2238 	      if (sched_insn_is_legitimate_for_speculation_p (next, new_ds))
2239 		/* Here we got new control-speculative instruction.  */
2240 		ts = new_ds;
2241 	      else
2242 		/* NEXT isn't ready yet.  */
2243 		ts = DEP_POSTPONED;
2244 	    }
2245 	  else
2246 	    /* NEXT isn't ready yet.  */
2247             ts = DEP_POSTPONED;
2248 	}
2249     }
2250 
2251   return ts;
2252 }
2253 
2254 /* Return a string that contains the insn uid and optionally anything else
2255    necessary to identify this insn in an output.  It's valid to use a
2256    static buffer for this.  The ALIGNED parameter should cause the string
2257    to be formatted so that multiple output lines will line up nicely.  */
2258 
2259 static const char *
rgn_print_insn(const rtx_insn * insn,int aligned)2260 rgn_print_insn (const rtx_insn *insn, int aligned)
2261 {
2262   static char tmp[80];
2263 
2264   if (aligned)
2265     sprintf (tmp, "b%3d: i%4d", INSN_BB (insn), INSN_UID (insn));
2266   else
2267     {
2268       if (current_nr_blocks > 1 && INSN_BB (insn) != target_bb)
2269 	sprintf (tmp, "%d/b%d", INSN_UID (insn), INSN_BB (insn));
2270       else
2271 	sprintf (tmp, "%d", INSN_UID (insn));
2272     }
2273   return tmp;
2274 }
2275 
2276 /* Compare priority of two insns.  Return a positive number if the second
2277    insn is to be preferred for scheduling, and a negative one if the first
2278    is to be preferred.  Zero if they are equally good.  */
2279 
2280 static int
rgn_rank(rtx_insn * insn1,rtx_insn * insn2)2281 rgn_rank (rtx_insn *insn1, rtx_insn *insn2)
2282 {
2283   /* Some comparison make sense in interblock scheduling only.  */
2284   if (INSN_BB (insn1) != INSN_BB (insn2))
2285     {
2286       int spec_val, prob_val;
2287 
2288       /* Prefer an inblock motion on an interblock motion.  */
2289       if ((INSN_BB (insn2) == target_bb) && (INSN_BB (insn1) != target_bb))
2290 	return 1;
2291       if ((INSN_BB (insn1) == target_bb) && (INSN_BB (insn2) != target_bb))
2292 	return -1;
2293 
2294       /* Prefer a useful motion on a speculative one.  */
2295       spec_val = IS_SPECULATIVE_INSN (insn1) - IS_SPECULATIVE_INSN (insn2);
2296       if (spec_val)
2297 	return spec_val;
2298 
2299       /* Prefer a more probable (speculative) insn.  */
2300       prob_val = INSN_PROBABILITY (insn2) - INSN_PROBABILITY (insn1);
2301       if (prob_val)
2302 	return prob_val;
2303     }
2304   return 0;
2305 }
2306 
2307 /* NEXT is an instruction that depends on INSN (a backward dependence);
2308    return nonzero if we should include this dependence in priority
2309    calculations.  */
2310 
2311 int
contributes_to_priority(rtx_insn * next,rtx_insn * insn)2312 contributes_to_priority (rtx_insn *next, rtx_insn *insn)
2313 {
2314   /* NEXT and INSN reside in one ebb.  */
2315   return BLOCK_TO_BB (BLOCK_NUM (next)) == BLOCK_TO_BB (BLOCK_NUM (insn));
2316 }
2317 
2318 /* INSN is a JUMP_INSN.  Store the set of registers that must be
2319    considered as used by this jump in USED.  */
2320 
2321 static void
compute_jump_reg_dependencies(rtx insn ATTRIBUTE_UNUSED,regset used ATTRIBUTE_UNUSED)2322 compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED,
2323 			       regset used ATTRIBUTE_UNUSED)
2324 {
2325   /* Nothing to do here, since we postprocess jumps in
2326      add_branch_dependences.  */
2327 }
2328 
2329 /* This variable holds common_sched_info hooks and data relevant to
2330    the interblock scheduler.  */
2331 static struct common_sched_info_def rgn_common_sched_info;
2332 
2333 
2334 /* This holds data for the dependence analysis relevant to
2335    the interblock scheduler.  */
2336 static struct sched_deps_info_def rgn_sched_deps_info;
2337 
2338 /* This holds constant data used for initializing the above structure
2339    for the Haifa scheduler.  */
2340 static const struct sched_deps_info_def rgn_const_sched_deps_info =
2341   {
2342     compute_jump_reg_dependencies,
2343     NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2344     0, 0, 0
2345   };
2346 
2347 /* Same as above, but for the selective scheduler.  */
2348 static const struct sched_deps_info_def rgn_const_sel_sched_deps_info =
2349   {
2350     compute_jump_reg_dependencies,
2351     NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2352     0, 0, 0
2353   };
2354 
2355 /* Return true if scheduling INSN will trigger finish of scheduling
2356    current block.  */
2357 static bool
rgn_insn_finishes_block_p(rtx_insn * insn)2358 rgn_insn_finishes_block_p (rtx_insn *insn)
2359 {
2360   if (INSN_BB (insn) == target_bb
2361       && sched_target_n_insns + 1 == target_n_insns)
2362     /* INSN is the last not-scheduled instruction in the current block.  */
2363     return true;
2364 
2365   return false;
2366 }
2367 
2368 /* Used in schedule_insns to initialize current_sched_info for scheduling
2369    regions (or single basic blocks).  */
2370 
2371 static const struct haifa_sched_info rgn_const_sched_info =
2372 {
2373   init_ready_list,
2374   can_schedule_ready_p,
2375   schedule_more_p,
2376   new_ready,
2377   rgn_rank,
2378   rgn_print_insn,
2379   contributes_to_priority,
2380   rgn_insn_finishes_block_p,
2381 
2382   NULL, NULL,
2383   NULL, NULL,
2384   0, 0,
2385 
2386   rgn_add_remove_insn,
2387   begin_schedule_ready,
2388   NULL,
2389   advance_target_bb,
2390   NULL, NULL,
2391   SCHED_RGN
2392 };
2393 
2394 /* This variable holds the data and hooks needed to the Haifa scheduler backend
2395    for the interblock scheduler frontend.  */
2396 static struct haifa_sched_info rgn_sched_info;
2397 
2398 /* Returns maximum priority that an insn was assigned to.  */
2399 
2400 int
get_rgn_sched_max_insns_priority(void)2401 get_rgn_sched_max_insns_priority (void)
2402 {
2403   return rgn_sched_info.sched_max_insns_priority;
2404 }
2405 
2406 /* Determine if PAT sets a TARGET_CLASS_LIKELY_SPILLED_P register.  */
2407 
2408 static bool
sets_likely_spilled(rtx pat)2409 sets_likely_spilled (rtx pat)
2410 {
2411   bool ret = false;
2412   note_stores (pat, sets_likely_spilled_1, &ret);
2413   return ret;
2414 }
2415 
2416 static void
sets_likely_spilled_1(rtx x,const_rtx pat,void * data)2417 sets_likely_spilled_1 (rtx x, const_rtx pat, void *data)
2418 {
2419   bool *ret = (bool *) data;
2420 
2421   if (GET_CODE (pat) == SET
2422       && REG_P (x)
2423       && HARD_REGISTER_P (x)
2424       && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (x))))
2425     *ret = true;
2426 }
2427 
2428 /* A bitmap to note insns that participate in any dependency.  Used in
2429    add_branch_dependences.  */
2430 static sbitmap insn_referenced;
2431 
2432 /* Add dependences so that branches are scheduled to run last in their
2433    block.  */
2434 static void
add_branch_dependences(rtx_insn * head,rtx_insn * tail)2435 add_branch_dependences (rtx_insn *head, rtx_insn *tail)
2436 {
2437   rtx_insn *insn, *last;
2438 
2439   /* For all branches, calls, uses, clobbers, cc0 setters, and instructions
2440      that can throw exceptions, force them to remain in order at the end of
2441      the block by adding dependencies and giving the last a high priority.
2442      There may be notes present, and prev_head may also be a note.
2443 
2444      Branches must obviously remain at the end.  Calls should remain at the
2445      end since moving them results in worse register allocation.  Uses remain
2446      at the end to ensure proper register allocation.
2447 
2448      cc0 setters remain at the end because they can't be moved away from
2449      their cc0 user.
2450 
2451      Predecessors of SCHED_GROUP_P instructions at the end remain at the end.
2452 
2453      COND_EXEC insns cannot be moved past a branch (see e.g. PR17808).
2454 
2455      Insns setting TARGET_CLASS_LIKELY_SPILLED_P registers (usually return
2456      values) are not moved before reload because we can wind up with register
2457      allocation failures.  */
2458 
2459   while (tail != head && DEBUG_INSN_P (tail))
2460     tail = PREV_INSN (tail);
2461 
2462   insn = tail;
2463   last = 0;
2464   while (CALL_P (insn)
2465 	 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
2466 	 || (NONJUMP_INSN_P (insn)
2467 	     && (GET_CODE (PATTERN (insn)) == USE
2468 		 || GET_CODE (PATTERN (insn)) == CLOBBER
2469 		 || can_throw_internal (insn)
2470 		 || (HAVE_cc0 && sets_cc0_p (PATTERN (insn)))
2471 		 || (!reload_completed
2472 		     && sets_likely_spilled (PATTERN (insn)))))
2473 	 || NOTE_P (insn)
2474 	 || (last != 0 && SCHED_GROUP_P (last)))
2475     {
2476       if (!NOTE_P (insn))
2477 	{
2478 	  if (last != 0
2479 	      && sd_find_dep_between (insn, last, false) == NULL)
2480 	    {
2481 	      if (! sched_insns_conditions_mutex_p (last, insn))
2482 		add_dependence (last, insn, REG_DEP_ANTI);
2483 	      bitmap_set_bit (insn_referenced, INSN_LUID (insn));
2484 	    }
2485 
2486 	  CANT_MOVE (insn) = 1;
2487 
2488 	  last = insn;
2489 	}
2490 
2491       /* Don't overrun the bounds of the basic block.  */
2492       if (insn == head)
2493 	break;
2494 
2495       do
2496 	insn = PREV_INSN (insn);
2497       while (insn != head && DEBUG_INSN_P (insn));
2498     }
2499 
2500   /* Selective scheduling handles control dependencies by itself, and
2501      CANT_MOVE flags ensure that other insns will be kept in place.  */
2502   if (sel_sched_p ())
2503     return;
2504 
2505   /* Make sure these insns are scheduled last in their block.  */
2506   insn = last;
2507   if (insn != 0)
2508     while (insn != head)
2509       {
2510 	insn = prev_nonnote_insn (insn);
2511 
2512 	if (bitmap_bit_p (insn_referenced, INSN_LUID (insn))
2513 	    || DEBUG_INSN_P (insn))
2514 	  continue;
2515 
2516 	if (! sched_insns_conditions_mutex_p (last, insn))
2517 	  add_dependence (last, insn, REG_DEP_ANTI);
2518       }
2519 
2520   if (!targetm.have_conditional_execution ())
2521     return;
2522 
2523   /* Finally, if the block ends in a jump, and we are doing intra-block
2524      scheduling, make sure that the branch depends on any COND_EXEC insns
2525      inside the block to avoid moving the COND_EXECs past the branch insn.
2526 
2527      We only have to do this after reload, because (1) before reload there
2528      are no COND_EXEC insns, and (2) the region scheduler is an intra-block
2529      scheduler after reload.
2530 
2531      FIXME: We could in some cases move COND_EXEC insns past the branch if
2532      this scheduler would be a little smarter.  Consider this code:
2533 
2534 		T = [addr]
2535 	C  ?	addr += 4
2536 	!C ?	X += 12
2537 	C  ?	T += 1
2538 	C  ?	jump foo
2539 
2540      On a target with a one cycle stall on a memory access the optimal
2541      sequence would be:
2542 
2543 		T = [addr]
2544 	C  ?	addr += 4
2545 	C  ?	T += 1
2546 	C  ?	jump foo
2547 	!C ?	X += 12
2548 
2549      We don't want to put the 'X += 12' before the branch because it just
2550      wastes a cycle of execution time when the branch is taken.
2551 
2552      Note that in the example "!C" will always be true.  That is another
2553      possible improvement for handling COND_EXECs in this scheduler: it
2554      could remove always-true predicates.  */
2555 
2556   if (!reload_completed || ! (JUMP_P (tail) || JUMP_TABLE_DATA_P (tail)))
2557     return;
2558 
2559   insn = tail;
2560   while (insn != head)
2561     {
2562       insn = PREV_INSN (insn);
2563 
2564       /* Note that we want to add this dependency even when
2565 	 sched_insns_conditions_mutex_p returns true.  The whole point
2566 	 is that we _want_ this dependency, even if these insns really
2567 	 are independent.  */
2568       if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == COND_EXEC)
2569 	add_dependence (tail, insn, REG_DEP_ANTI);
2570     }
2571 }
2572 
2573 /* Data structures for the computation of data dependences in a regions.  We
2574    keep one `deps' structure for every basic block.  Before analyzing the
2575    data dependences for a bb, its variables are initialized as a function of
2576    the variables of its predecessors.  When the analysis for a bb completes,
2577    we save the contents to the corresponding bb_deps[bb] variable.  */
2578 
2579 static struct deps_desc *bb_deps;
2580 
2581 static void
concat_insn_mem_list(rtx_insn_list * copy_insns,rtx_expr_list * copy_mems,rtx_insn_list ** old_insns_p,rtx_expr_list ** old_mems_p)2582 concat_insn_mem_list (rtx_insn_list *copy_insns,
2583 		      rtx_expr_list *copy_mems,
2584 		      rtx_insn_list **old_insns_p,
2585 		      rtx_expr_list **old_mems_p)
2586 {
2587   rtx_insn_list *new_insns = *old_insns_p;
2588   rtx_expr_list *new_mems = *old_mems_p;
2589 
2590   while (copy_insns)
2591     {
2592       new_insns = alloc_INSN_LIST (copy_insns->insn (), new_insns);
2593       new_mems = alloc_EXPR_LIST (VOIDmode, copy_mems->element (), new_mems);
2594       copy_insns = copy_insns->next ();
2595       copy_mems = copy_mems->next ();
2596     }
2597 
2598   *old_insns_p = new_insns;
2599   *old_mems_p = new_mems;
2600 }
2601 
2602 /* Join PRED_DEPS to the SUCC_DEPS.  */
2603 void
deps_join(struct deps_desc * succ_deps,struct deps_desc * pred_deps)2604 deps_join (struct deps_desc *succ_deps, struct deps_desc *pred_deps)
2605 {
2606   unsigned reg;
2607   reg_set_iterator rsi;
2608 
2609   /* The reg_last lists are inherited by successor.  */
2610   EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg, rsi)
2611     {
2612       struct deps_reg *pred_rl = &pred_deps->reg_last[reg];
2613       struct deps_reg *succ_rl = &succ_deps->reg_last[reg];
2614 
2615       succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses);
2616       succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets);
2617       succ_rl->implicit_sets
2618 	= concat_INSN_LIST (pred_rl->implicit_sets, succ_rl->implicit_sets);
2619       succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers,
2620                                             succ_rl->clobbers);
2621       succ_rl->uses_length += pred_rl->uses_length;
2622       succ_rl->clobbers_length += pred_rl->clobbers_length;
2623     }
2624   IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use);
2625 
2626   /* Mem read/write lists are inherited by successor.  */
2627   concat_insn_mem_list (pred_deps->pending_read_insns,
2628                         pred_deps->pending_read_mems,
2629                         &succ_deps->pending_read_insns,
2630                         &succ_deps->pending_read_mems);
2631   concat_insn_mem_list (pred_deps->pending_write_insns,
2632                         pred_deps->pending_write_mems,
2633                         &succ_deps->pending_write_insns,
2634                         &succ_deps->pending_write_mems);
2635 
2636   succ_deps->pending_jump_insns
2637     = concat_INSN_LIST (pred_deps->pending_jump_insns,
2638                         succ_deps->pending_jump_insns);
2639   succ_deps->last_pending_memory_flush
2640     = concat_INSN_LIST (pred_deps->last_pending_memory_flush,
2641                         succ_deps->last_pending_memory_flush);
2642 
2643   succ_deps->pending_read_list_length += pred_deps->pending_read_list_length;
2644   succ_deps->pending_write_list_length += pred_deps->pending_write_list_length;
2645   succ_deps->pending_flush_length += pred_deps->pending_flush_length;
2646 
2647   /* last_function_call is inherited by successor.  */
2648   succ_deps->last_function_call
2649     = concat_INSN_LIST (pred_deps->last_function_call,
2650                         succ_deps->last_function_call);
2651 
2652   /* last_function_call_may_noreturn is inherited by successor.  */
2653   succ_deps->last_function_call_may_noreturn
2654     = concat_INSN_LIST (pred_deps->last_function_call_may_noreturn,
2655                         succ_deps->last_function_call_may_noreturn);
2656 
2657   /* sched_before_next_call is inherited by successor.  */
2658   succ_deps->sched_before_next_call
2659     = concat_INSN_LIST (pred_deps->sched_before_next_call,
2660                         succ_deps->sched_before_next_call);
2661 }
2662 
2663 /* After computing the dependencies for block BB, propagate the dependencies
2664    found in TMP_DEPS to the successors of the block.  */
2665 static void
propagate_deps(int bb,struct deps_desc * pred_deps)2666 propagate_deps (int bb, struct deps_desc *pred_deps)
2667 {
2668   basic_block block = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb));
2669   edge_iterator ei;
2670   edge e;
2671 
2672   /* bb's structures are inherited by its successors.  */
2673   FOR_EACH_EDGE (e, ei, block->succs)
2674     {
2675       /* Only bbs "below" bb, in the same region, are interesting.  */
2676       if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
2677 	  || CONTAINING_RGN (block->index) != CONTAINING_RGN (e->dest->index)
2678 	  || BLOCK_TO_BB (e->dest->index) <= bb)
2679 	continue;
2680 
2681       deps_join (bb_deps + BLOCK_TO_BB (e->dest->index), pred_deps);
2682     }
2683 
2684   /* These lists should point to the right place, for correct
2685      freeing later.  */
2686   bb_deps[bb].pending_read_insns = pred_deps->pending_read_insns;
2687   bb_deps[bb].pending_read_mems = pred_deps->pending_read_mems;
2688   bb_deps[bb].pending_write_insns = pred_deps->pending_write_insns;
2689   bb_deps[bb].pending_write_mems = pred_deps->pending_write_mems;
2690   bb_deps[bb].pending_jump_insns = pred_deps->pending_jump_insns;
2691 
2692   /* Can't allow these to be freed twice.  */
2693   pred_deps->pending_read_insns = 0;
2694   pred_deps->pending_read_mems = 0;
2695   pred_deps->pending_write_insns = 0;
2696   pred_deps->pending_write_mems = 0;
2697   pred_deps->pending_jump_insns = 0;
2698 }
2699 
2700 /* Compute dependences inside bb.  In a multiple blocks region:
2701    (1) a bb is analyzed after its predecessors, and (2) the lists in
2702    effect at the end of bb (after analyzing for bb) are inherited by
2703    bb's successors.
2704 
2705    Specifically for reg-reg data dependences, the block insns are
2706    scanned by sched_analyze () top-to-bottom.  Three lists are
2707    maintained by sched_analyze (): reg_last[].sets for register DEFs,
2708    reg_last[].implicit_sets for implicit hard register DEFs, and
2709    reg_last[].uses for register USEs.
2710 
2711    When analysis is completed for bb, we update for its successors:
2712    ;  - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
2713    ;  - IMPLICIT_DEFS[succ] = Union (IMPLICIT_DEFS [succ], IMPLICIT_DEFS [bb])
2714    ;  - USES[succ] = Union (USES [succ], DEFS [bb])
2715 
2716    The mechanism for computing mem-mem data dependence is very
2717    similar, and the result is interblock dependences in the region.  */
2718 
2719 static void
compute_block_dependences(int bb)2720 compute_block_dependences (int bb)
2721 {
2722   rtx_insn *head, *tail;
2723   struct deps_desc tmp_deps;
2724 
2725   tmp_deps = bb_deps[bb];
2726 
2727   /* Do the analysis for this block.  */
2728   gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2729   get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2730 
2731   sched_analyze (&tmp_deps, head, tail);
2732 
2733   add_branch_dependences (head, tail);
2734 
2735   if (current_nr_blocks > 1)
2736     propagate_deps (bb, &tmp_deps);
2737 
2738   /* Free up the INSN_LISTs.  */
2739   free_deps (&tmp_deps);
2740 
2741   if (targetm.sched.dependencies_evaluation_hook)
2742     targetm.sched.dependencies_evaluation_hook (head, tail);
2743 }
2744 
2745 /* Free dependencies of instructions inside BB.  */
2746 static void
free_block_dependencies(int bb)2747 free_block_dependencies (int bb)
2748 {
2749   rtx_insn *head;
2750   rtx_insn *tail;
2751 
2752   get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2753 
2754   if (no_real_insns_p (head, tail))
2755     return;
2756 
2757   sched_free_deps (head, tail, true);
2758 }
2759 
2760 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
2761    them to the unused_*_list variables, so that they can be reused.  */
2762 
2763 static void
free_pending_lists(void)2764 free_pending_lists (void)
2765 {
2766   int bb;
2767 
2768   for (bb = 0; bb < current_nr_blocks; bb++)
2769     {
2770       free_INSN_LIST_list (&bb_deps[bb].pending_read_insns);
2771       free_INSN_LIST_list (&bb_deps[bb].pending_write_insns);
2772       free_EXPR_LIST_list (&bb_deps[bb].pending_read_mems);
2773       free_EXPR_LIST_list (&bb_deps[bb].pending_write_mems);
2774       free_INSN_LIST_list (&bb_deps[bb].pending_jump_insns);
2775     }
2776 }
2777 
2778 /* Print dependences for debugging starting from FROM_BB.
2779    Callable from debugger.  */
2780 /* Print dependences for debugging starting from FROM_BB.
2781    Callable from debugger.  */
2782 DEBUG_FUNCTION void
debug_rgn_dependencies(int from_bb)2783 debug_rgn_dependencies (int from_bb)
2784 {
2785   int bb;
2786 
2787   fprintf (sched_dump,
2788 	   ";;   --------------- forward dependences: ------------ \n");
2789 
2790   for (bb = from_bb; bb < current_nr_blocks; bb++)
2791     {
2792       rtx_insn *head, *tail;
2793 
2794       get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2795       fprintf (sched_dump, "\n;;   --- Region Dependences --- b %d bb %d \n",
2796 	       BB_TO_BLOCK (bb), bb);
2797 
2798       debug_dependencies (head, tail);
2799     }
2800 }
2801 
2802 /* Print dependencies information for instructions between HEAD and TAIL.
2803    ??? This function would probably fit best in haifa-sched.c.  */
debug_dependencies(rtx_insn * head,rtx_insn * tail)2804 void debug_dependencies (rtx_insn *head, rtx_insn *tail)
2805 {
2806   rtx_insn *insn;
2807   rtx_insn *next_tail = NEXT_INSN (tail);
2808 
2809   fprintf (sched_dump, ";;   %7s%6s%6s%6s%6s%6s%14s\n",
2810 	   "insn", "code", "bb", "dep", "prio", "cost",
2811 	   "reservation");
2812   fprintf (sched_dump, ";;   %7s%6s%6s%6s%6s%6s%14s\n",
2813 	   "----", "----", "--", "---", "----", "----",
2814 	   "-----------");
2815 
2816   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2817     {
2818       if (! INSN_P (insn))
2819 	{
2820 	  int n;
2821 	  fprintf (sched_dump, ";;   %6d ", INSN_UID (insn));
2822 	  if (NOTE_P (insn))
2823 	    {
2824 	      n = NOTE_KIND (insn);
2825 	      fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
2826 	    }
2827 	  else
2828 	    fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
2829 	  continue;
2830 	}
2831 
2832       fprintf (sched_dump,
2833 	       ";;   %s%5d%6d%6d%6d%6d%6d   ",
2834 	       (SCHED_GROUP_P (insn) ? "+" : " "),
2835 	       INSN_UID (insn),
2836 	       INSN_CODE (insn),
2837 	       BLOCK_NUM (insn),
2838 	       sched_emulate_haifa_p ? -1 : sd_lists_size (insn, SD_LIST_BACK),
2839 	       (sel_sched_p () ? (sched_emulate_haifa_p ? -1
2840 			       : INSN_PRIORITY (insn))
2841 		: INSN_PRIORITY (insn)),
2842 	       (sel_sched_p () ? (sched_emulate_haifa_p ? -1
2843 			       : insn_sched_cost (insn))
2844 		: insn_sched_cost (insn)));
2845 
2846       if (recog_memoized (insn) < 0)
2847 	fprintf (sched_dump, "nothing");
2848       else
2849 	print_reservation (sched_dump, insn);
2850 
2851       fprintf (sched_dump, "\t: ");
2852       {
2853 	sd_iterator_def sd_it;
2854 	dep_t dep;
2855 
2856 	FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
2857 	  fprintf (sched_dump, "%d%s%s ", INSN_UID (DEP_CON (dep)),
2858 		   DEP_NONREG (dep) ? "n" : "",
2859 		   DEP_MULTIPLE (dep) ? "m" : "");
2860       }
2861       fprintf (sched_dump, "\n");
2862     }
2863 
2864   fprintf (sched_dump, "\n");
2865 }
2866 
2867 /* Dump dependency graph for the current region to a file using dot syntax.  */
2868 
2869 void
dump_rgn_dependencies_dot(FILE * file)2870 dump_rgn_dependencies_dot (FILE *file)
2871 {
2872   rtx_insn *head, *tail, *con, *pro;
2873   sd_iterator_def sd_it;
2874   dep_t dep;
2875   int bb;
2876   pretty_printer pp;
2877 
2878   pp.buffer->stream = file;
2879   pp_printf (&pp, "digraph SchedDG {\n");
2880 
2881   for (bb = 0; bb < current_nr_blocks; ++bb)
2882     {
2883       /* Begin subgraph (basic block).  */
2884       pp_printf (&pp, "subgraph cluster_block_%d {\n", bb);
2885       pp_printf (&pp, "\t" "color=blue;" "\n");
2886       pp_printf (&pp, "\t" "style=bold;" "\n");
2887       pp_printf (&pp, "\t" "label=\"BB #%d\";\n", BB_TO_BLOCK (bb));
2888 
2889       /* Setup head and tail (no support for EBBs).  */
2890       gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2891       get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2892       tail = NEXT_INSN (tail);
2893 
2894       /* Dump all insns.  */
2895       for (con = head; con != tail; con = NEXT_INSN (con))
2896 	{
2897 	  if (!INSN_P (con))
2898 	    continue;
2899 
2900 	  /* Pretty print the insn.  */
2901 	  pp_printf (&pp, "\t%d [label=\"{", INSN_UID (con));
2902 	  pp_write_text_to_stream (&pp);
2903 	  print_insn (&pp, con, /*verbose=*/false);
2904 	  pp_write_text_as_dot_label_to_stream (&pp, /*for_record=*/true);
2905 	  pp_write_text_to_stream (&pp);
2906 
2907 	  /* Dump instruction attributes.  */
2908 	  pp_printf (&pp, "|{ uid:%d | luid:%d | prio:%d }}\",shape=record]\n",
2909 		     INSN_UID (con), INSN_LUID (con), INSN_PRIORITY (con));
2910 
2911 	  /* Dump all deps.  */
2912 	  FOR_EACH_DEP (con, SD_LIST_BACK, sd_it, dep)
2913 	    {
2914 	      int weight = 0;
2915 	      const char *color;
2916 	      pro = DEP_PRO (dep);
2917 
2918 	      switch (DEP_TYPE (dep))
2919 		{
2920 		case REG_DEP_TRUE:
2921 		  color = "black";
2922 		  weight = 1;
2923 		  break;
2924 		case REG_DEP_OUTPUT:
2925 		case REG_DEP_ANTI:
2926 		  color = "orange";
2927 		  break;
2928 		case REG_DEP_CONTROL:
2929 		  color = "blue";
2930 		  break;
2931 		default:
2932 		  gcc_unreachable ();
2933 		}
2934 
2935 	      pp_printf (&pp, "\t%d -> %d [color=%s",
2936 			 INSN_UID (pro), INSN_UID (con), color);
2937 	      if (int cost = dep_cost (dep))
2938 		pp_printf (&pp, ",label=%d", cost);
2939 	      pp_printf (&pp, ",weight=%d", weight);
2940 	      pp_printf (&pp, "];\n");
2941 	    }
2942 	}
2943       pp_printf (&pp, "}\n");
2944     }
2945 
2946   pp_printf (&pp, "}\n");
2947   pp_flush (&pp);
2948 }
2949 
2950 /* Dump dependency graph for the current region to a file using dot syntax.  */
2951 
2952 DEBUG_FUNCTION void
dump_rgn_dependencies_dot(const char * fname)2953 dump_rgn_dependencies_dot (const char *fname)
2954 {
2955   FILE *fp;
2956 
2957   fp = fopen (fname, "w");
2958   if (!fp)
2959     {
2960       perror ("fopen");
2961       return;
2962     }
2963 
2964   dump_rgn_dependencies_dot (fp);
2965   fclose (fp);
2966 }
2967 
2968 
2969 /* Returns true if all the basic blocks of the current region have
2970    NOTE_DISABLE_SCHED_OF_BLOCK which means not to schedule that region.  */
2971 bool
sched_is_disabled_for_current_region_p(void)2972 sched_is_disabled_for_current_region_p (void)
2973 {
2974   int bb;
2975 
2976   for (bb = 0; bb < current_nr_blocks; bb++)
2977     if (!(BASIC_BLOCK_FOR_FN (cfun,
2978 			      BB_TO_BLOCK (bb))->flags & BB_DISABLE_SCHEDULE))
2979       return false;
2980 
2981   return true;
2982 }
2983 
2984 /* Free all region dependencies saved in INSN_BACK_DEPS and
2985    INSN_RESOLVED_BACK_DEPS.  The Haifa scheduler does this on the fly
2986    when scheduling, so this function is supposed to be called from
2987    the selective scheduling only.  */
2988 void
free_rgn_deps(void)2989 free_rgn_deps (void)
2990 {
2991   int bb;
2992 
2993   for (bb = 0; bb < current_nr_blocks; bb++)
2994     {
2995       rtx_insn *head, *tail;
2996 
2997       gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2998       get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2999 
3000       sched_free_deps (head, tail, false);
3001     }
3002 }
3003 
3004 static int rgn_n_insns;
3005 
3006 /* Compute insn priority for a current region.  */
3007 void
compute_priorities(void)3008 compute_priorities (void)
3009 {
3010   int bb;
3011 
3012   current_sched_info->sched_max_insns_priority = 0;
3013   for (bb = 0; bb < current_nr_blocks; bb++)
3014     {
3015       rtx_insn *head, *tail;
3016 
3017       gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
3018       get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
3019 
3020       if (no_real_insns_p (head, tail))
3021 	continue;
3022 
3023       rgn_n_insns += set_priorities (head, tail);
3024     }
3025   current_sched_info->sched_max_insns_priority++;
3026 }
3027 
3028 /* (Re-)initialize the arrays of DFA states at the end of each basic block.
3029 
3030    SAVED_LAST_BASIC_BLOCK is the previous length of the arrays.  It must be
3031    zero for the first call to this function, to allocate the arrays for the
3032    first time.
3033 
3034    This function is called once during initialization of the scheduler, and
3035    called again to resize the arrays if new basic blocks have been created,
3036    for example for speculation recovery code.  */
3037 
3038 static void
realloc_bb_state_array(int saved_last_basic_block)3039 realloc_bb_state_array (int saved_last_basic_block)
3040 {
3041   char *old_bb_state_array = bb_state_array;
3042   size_t lbb = (size_t) last_basic_block_for_fn (cfun);
3043   size_t slbb = (size_t) saved_last_basic_block;
3044 
3045   /* Nothing to do if nothing changed since the last time this was called.  */
3046   if (saved_last_basic_block == last_basic_block_for_fn (cfun))
3047     return;
3048 
3049   /* The selective scheduler doesn't use the state arrays.  */
3050   if (sel_sched_p ())
3051     {
3052       gcc_assert (bb_state_array == NULL && bb_state == NULL);
3053       return;
3054     }
3055 
3056   gcc_checking_assert (saved_last_basic_block == 0
3057 		       || (bb_state_array != NULL && bb_state != NULL));
3058 
3059   bb_state_array = XRESIZEVEC (char, bb_state_array, lbb * dfa_state_size);
3060   bb_state = XRESIZEVEC (state_t, bb_state, lbb);
3061 
3062   /* If BB_STATE_ARRAY has moved, fixup all the state pointers array.
3063      Otherwise only fixup the newly allocated ones.  For the state
3064      array itself, only initialize the new entries.  */
3065   bool bb_state_array_moved = (bb_state_array != old_bb_state_array);
3066   for (size_t i = bb_state_array_moved ? 0 : slbb; i < lbb; i++)
3067     bb_state[i] = (state_t) (bb_state_array + i * dfa_state_size);
3068   for (size_t i = slbb; i < lbb; i++)
3069     state_reset (bb_state[i]);
3070 }
3071 
3072 /* Free the arrays of DFA states at the end of each basic block.  */
3073 
3074 static void
free_bb_state_array(void)3075 free_bb_state_array (void)
3076 {
3077   free (bb_state_array);
3078   free (bb_state);
3079   bb_state_array = NULL;
3080   bb_state = NULL;
3081 }
3082 
3083 /* Schedule a region.  A region is either an inner loop, a loop-free
3084    subroutine, or a single basic block.  Each bb in the region is
3085    scheduled after its flow predecessors.  */
3086 
3087 static void
schedule_region(int rgn)3088 schedule_region (int rgn)
3089 {
3090   int bb;
3091   int sched_rgn_n_insns = 0;
3092 
3093   rgn_n_insns = 0;
3094 
3095   /* Do not support register pressure sensitive scheduling for the new regions
3096      as we don't update the liveness info for them.  */
3097   if (sched_pressure != SCHED_PRESSURE_NONE
3098       && rgn >= nr_regions_initial)
3099     {
3100       free_global_sched_pressure_data ();
3101       sched_pressure = SCHED_PRESSURE_NONE;
3102     }
3103 
3104   rgn_setup_region (rgn);
3105 
3106   /* Don't schedule region that is marked by
3107      NOTE_DISABLE_SCHED_OF_BLOCK.  */
3108   if (sched_is_disabled_for_current_region_p ())
3109     return;
3110 
3111   sched_rgn_compute_dependencies (rgn);
3112 
3113   sched_rgn_local_init (rgn);
3114 
3115   /* Set priorities.  */
3116   compute_priorities ();
3117 
3118   sched_extend_ready_list (rgn_n_insns);
3119 
3120   if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
3121     {
3122       sched_init_region_reg_pressure_info ();
3123       for (bb = 0; bb < current_nr_blocks; bb++)
3124 	{
3125 	  basic_block first_bb, last_bb;
3126 	  rtx_insn *head, *tail;
3127 
3128 	  first_bb = EBB_FIRST_BB (bb);
3129 	  last_bb = EBB_LAST_BB (bb);
3130 
3131 	  get_ebb_head_tail (first_bb, last_bb, &head, &tail);
3132 
3133 	  if (no_real_insns_p (head, tail))
3134 	    {
3135 	      gcc_assert (first_bb == last_bb);
3136 	      continue;
3137 	    }
3138 	  sched_setup_bb_reg_pressure_info (first_bb, PREV_INSN (head));
3139 	}
3140     }
3141 
3142   /* Now we can schedule all blocks.  */
3143   for (bb = 0; bb < current_nr_blocks; bb++)
3144     {
3145       basic_block first_bb, last_bb, curr_bb;
3146       rtx_insn *head, *tail;
3147 
3148       first_bb = EBB_FIRST_BB (bb);
3149       last_bb = EBB_LAST_BB (bb);
3150 
3151       get_ebb_head_tail (first_bb, last_bb, &head, &tail);
3152 
3153       if (no_real_insns_p (head, tail))
3154 	{
3155 	  gcc_assert (first_bb == last_bb);
3156 	  continue;
3157 	}
3158 
3159       current_sched_info->prev_head = PREV_INSN (head);
3160       current_sched_info->next_tail = NEXT_INSN (tail);
3161 
3162       remove_notes (head, tail);
3163 
3164       unlink_bb_notes (first_bb, last_bb);
3165 
3166       target_bb = bb;
3167 
3168       gcc_assert (flag_schedule_interblock || current_nr_blocks == 1);
3169       current_sched_info->queue_must_finish_empty = current_nr_blocks == 1;
3170 
3171       curr_bb = first_bb;
3172       if (dbg_cnt (sched_block))
3173         {
3174 	  edge f;
3175 	  int saved_last_basic_block = last_basic_block_for_fn (cfun);
3176 
3177 	  schedule_block (&curr_bb, bb_state[first_bb->index]);
3178 	  gcc_assert (EBB_FIRST_BB (bb) == first_bb);
3179 	  sched_rgn_n_insns += sched_n_insns;
3180 	  realloc_bb_state_array (saved_last_basic_block);
3181 	  f = find_fallthru_edge (last_bb->succs);
3182 	  if (f
3183 	      && (!f->probability.initialized_p ()
3184 		  || f->probability.to_reg_br_prob_base () * 100 / REG_BR_PROB_BASE >=
3185 	             PARAM_VALUE (PARAM_SCHED_STATE_EDGE_PROB_CUTOFF)))
3186 	    {
3187 	      memcpy (bb_state[f->dest->index], curr_state,
3188 		      dfa_state_size);
3189 	      if (sched_verbose >= 5)
3190 		fprintf (sched_dump, "saving state for edge %d->%d\n",
3191 			 f->src->index, f->dest->index);
3192 	    }
3193         }
3194       else
3195         {
3196           sched_rgn_n_insns += rgn_n_insns;
3197         }
3198 
3199       /* Clean up.  */
3200       if (current_nr_blocks > 1)
3201 	free_trg_info ();
3202     }
3203 
3204   /* Sanity check: verify that all region insns were scheduled.  */
3205   gcc_assert (sched_rgn_n_insns == rgn_n_insns);
3206 
3207   sched_finish_ready_list ();
3208 
3209   /* Done with this region.  */
3210   sched_rgn_local_finish ();
3211 
3212   /* Free dependencies.  */
3213   for (bb = 0; bb < current_nr_blocks; ++bb)
3214     free_block_dependencies (bb);
3215 
3216   gcc_assert (haifa_recovery_bb_ever_added_p
3217 	      || deps_pools_are_empty_p ());
3218 }
3219 
3220 /* Initialize data structures for region scheduling.  */
3221 
3222 void
sched_rgn_init(bool single_blocks_p)3223 sched_rgn_init (bool single_blocks_p)
3224 {
3225   min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE)
3226 		    / 100);
3227 
3228   nr_inter = 0;
3229   nr_spec = 0;
3230 
3231   extend_regions ();
3232 
3233   CONTAINING_RGN (ENTRY_BLOCK) = -1;
3234   CONTAINING_RGN (EXIT_BLOCK) = -1;
3235 
3236   realloc_bb_state_array (0);
3237 
3238   /* Compute regions for scheduling.  */
3239   if (single_blocks_p
3240       || n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS + 1
3241       || !flag_schedule_interblock
3242       || is_cfg_nonregular ())
3243     {
3244       find_single_block_region (sel_sched_p ());
3245     }
3246   else
3247     {
3248       /* Compute the dominators and post dominators.  */
3249       if (!sel_sched_p ())
3250 	calculate_dominance_info (CDI_DOMINATORS);
3251 
3252       /* Find regions.  */
3253       find_rgns ();
3254 
3255       if (sched_verbose >= 3)
3256 	debug_regions ();
3257 
3258       /* For now.  This will move as more and more of haifa is converted
3259 	 to using the cfg code.  */
3260       if (!sel_sched_p ())
3261 	free_dominance_info (CDI_DOMINATORS);
3262     }
3263 
3264   gcc_assert (nr_regions > 0 && nr_regions <= n_basic_blocks_for_fn (cfun));
3265 
3266   RGN_BLOCKS (nr_regions) = (RGN_BLOCKS (nr_regions - 1)
3267 			     + RGN_NR_BLOCKS (nr_regions - 1));
3268   nr_regions_initial = nr_regions;
3269 }
3270 
3271 /* Free data structures for region scheduling.  */
3272 void
sched_rgn_finish(void)3273 sched_rgn_finish (void)
3274 {
3275   free_bb_state_array ();
3276 
3277   /* Reposition the prologue and epilogue notes in case we moved the
3278      prologue/epilogue insns.  */
3279   if (reload_completed)
3280     reposition_prologue_and_epilogue_notes ();
3281 
3282   if (sched_verbose)
3283     {
3284       if (reload_completed == 0
3285 	  && flag_schedule_interblock)
3286 	{
3287 	  fprintf (sched_dump,
3288 		   "\n;; Procedure interblock/speculative motions == %d/%d \n",
3289 		   nr_inter, nr_spec);
3290 	}
3291       else
3292 	gcc_assert (nr_inter <= 0);
3293       fprintf (sched_dump, "\n\n");
3294     }
3295 
3296   nr_regions = 0;
3297 
3298   free (rgn_table);
3299   rgn_table = NULL;
3300 
3301   free (rgn_bb_table);
3302   rgn_bb_table = NULL;
3303 
3304   free (block_to_bb);
3305   block_to_bb = NULL;
3306 
3307   free (containing_rgn);
3308   containing_rgn = NULL;
3309 
3310   free (ebb_head);
3311   ebb_head = NULL;
3312 }
3313 
3314 /* Setup global variables like CURRENT_BLOCKS and CURRENT_NR_BLOCK to
3315    point to the region RGN.  */
3316 void
rgn_setup_region(int rgn)3317 rgn_setup_region (int rgn)
3318 {
3319   int bb;
3320 
3321   /* Set variables for the current region.  */
3322   current_nr_blocks = RGN_NR_BLOCKS (rgn);
3323   current_blocks = RGN_BLOCKS (rgn);
3324 
3325   /* EBB_HEAD is a region-scope structure.  But we realloc it for
3326      each region to save time/memory/something else.
3327      See comments in add_block1, for what reasons we allocate +1 element.  */
3328   ebb_head = XRESIZEVEC (int, ebb_head, current_nr_blocks + 1);
3329   for (bb = 0; bb <= current_nr_blocks; bb++)
3330     ebb_head[bb] = current_blocks + bb;
3331 }
3332 
3333 /* Compute instruction dependencies in region RGN.  */
3334 void
sched_rgn_compute_dependencies(int rgn)3335 sched_rgn_compute_dependencies (int rgn)
3336 {
3337   if (!RGN_DONT_CALC_DEPS (rgn))
3338     {
3339       int bb;
3340 
3341       if (sel_sched_p ())
3342 	sched_emulate_haifa_p = 1;
3343 
3344       init_deps_global ();
3345 
3346       /* Initializations for region data dependence analysis.  */
3347       bb_deps = XNEWVEC (struct deps_desc, current_nr_blocks);
3348       for (bb = 0; bb < current_nr_blocks; bb++)
3349 	init_deps (bb_deps + bb, false);
3350 
3351       /* Initialize bitmap used in add_branch_dependences.  */
3352       insn_referenced = sbitmap_alloc (sched_max_luid);
3353       bitmap_clear (insn_referenced);
3354 
3355       /* Compute backward dependencies.  */
3356       for (bb = 0; bb < current_nr_blocks; bb++)
3357 	compute_block_dependences (bb);
3358 
3359       sbitmap_free (insn_referenced);
3360       free_pending_lists ();
3361       finish_deps_global ();
3362       free (bb_deps);
3363 
3364       /* We don't want to recalculate this twice.  */
3365       RGN_DONT_CALC_DEPS (rgn) = 1;
3366 
3367       if (sel_sched_p ())
3368 	sched_emulate_haifa_p = 0;
3369     }
3370   else
3371     /* (This is a recovery block.  It is always a single block region.)
3372        OR (We use selective scheduling.)  */
3373     gcc_assert (current_nr_blocks == 1 || sel_sched_p ());
3374 }
3375 
3376 /* Init region data structures.  Returns true if this region should
3377    not be scheduled.  */
3378 void
sched_rgn_local_init(int rgn)3379 sched_rgn_local_init (int rgn)
3380 {
3381   int bb;
3382 
3383   /* Compute interblock info: probabilities, split-edges, dominators, etc.  */
3384   if (current_nr_blocks > 1)
3385     {
3386       basic_block block;
3387       edge e;
3388       edge_iterator ei;
3389 
3390       prob = XNEWVEC (int, current_nr_blocks);
3391 
3392       dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
3393       bitmap_vector_clear (dom, current_nr_blocks);
3394 
3395       /* Use ->aux to implement EDGE_TO_BIT mapping.  */
3396       rgn_nr_edges = 0;
3397       FOR_EACH_BB_FN (block, cfun)
3398 	{
3399 	  if (CONTAINING_RGN (block->index) != rgn)
3400 	    continue;
3401 	  FOR_EACH_EDGE (e, ei, block->succs)
3402 	    SET_EDGE_TO_BIT (e, rgn_nr_edges++);
3403 	}
3404 
3405       rgn_edges = XNEWVEC (edge, rgn_nr_edges);
3406       rgn_nr_edges = 0;
3407       FOR_EACH_BB_FN (block, cfun)
3408 	{
3409 	  if (CONTAINING_RGN (block->index) != rgn)
3410 	    continue;
3411 	  FOR_EACH_EDGE (e, ei, block->succs)
3412 	    rgn_edges[rgn_nr_edges++] = e;
3413 	}
3414 
3415       /* Split edges.  */
3416       pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
3417       bitmap_vector_clear (pot_split, current_nr_blocks);
3418       ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
3419       bitmap_vector_clear (ancestor_edges, current_nr_blocks);
3420 
3421       /* Compute probabilities, dominators, split_edges.  */
3422       for (bb = 0; bb < current_nr_blocks; bb++)
3423 	compute_dom_prob_ps (bb);
3424 
3425       /* Cleanup ->aux used for EDGE_TO_BIT mapping.  */
3426       /* We don't need them anymore.  But we want to avoid duplication of
3427 	 aux fields in the newly created edges.  */
3428       FOR_EACH_BB_FN (block, cfun)
3429 	{
3430 	  if (CONTAINING_RGN (block->index) != rgn)
3431 	    continue;
3432 	  FOR_EACH_EDGE (e, ei, block->succs)
3433 	    e->aux = NULL;
3434         }
3435     }
3436 }
3437 
3438 /* Free data computed for the finished region.  */
3439 void
sched_rgn_local_free(void)3440 sched_rgn_local_free (void)
3441 {
3442   free (prob);
3443   sbitmap_vector_free (dom);
3444   sbitmap_vector_free (pot_split);
3445   sbitmap_vector_free (ancestor_edges);
3446   free (rgn_edges);
3447 }
3448 
3449 /* Free data computed for the finished region.  */
3450 void
sched_rgn_local_finish(void)3451 sched_rgn_local_finish (void)
3452 {
3453   if (current_nr_blocks > 1 && !sel_sched_p ())
3454     {
3455       sched_rgn_local_free ();
3456     }
3457 }
3458 
3459 /* Setup scheduler infos.  */
3460 void
rgn_setup_common_sched_info(void)3461 rgn_setup_common_sched_info (void)
3462 {
3463   memcpy (&rgn_common_sched_info, &haifa_common_sched_info,
3464 	  sizeof (rgn_common_sched_info));
3465 
3466   rgn_common_sched_info.fix_recovery_cfg = rgn_fix_recovery_cfg;
3467   rgn_common_sched_info.add_block = rgn_add_block;
3468   rgn_common_sched_info.estimate_number_of_insns
3469     = rgn_estimate_number_of_insns;
3470   rgn_common_sched_info.sched_pass_id = SCHED_RGN_PASS;
3471 
3472   common_sched_info = &rgn_common_sched_info;
3473 }
3474 
3475 /* Setup all *_sched_info structures (for the Haifa frontend
3476    and for the dependence analysis) in the interblock scheduler.  */
3477 void
rgn_setup_sched_infos(void)3478 rgn_setup_sched_infos (void)
3479 {
3480   if (!sel_sched_p ())
3481     memcpy (&rgn_sched_deps_info, &rgn_const_sched_deps_info,
3482 	    sizeof (rgn_sched_deps_info));
3483   else
3484     memcpy (&rgn_sched_deps_info, &rgn_const_sel_sched_deps_info,
3485 	    sizeof (rgn_sched_deps_info));
3486 
3487   sched_deps_info = &rgn_sched_deps_info;
3488 
3489   memcpy (&rgn_sched_info, &rgn_const_sched_info, sizeof (rgn_sched_info));
3490   current_sched_info = &rgn_sched_info;
3491 }
3492 
3493 /* The one entry point in this file.  */
3494 void
schedule_insns(void)3495 schedule_insns (void)
3496 {
3497   int rgn;
3498 
3499   /* Taking care of this degenerate case makes the rest of
3500      this code simpler.  */
3501   if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
3502     return;
3503 
3504   rgn_setup_common_sched_info ();
3505   rgn_setup_sched_infos ();
3506 
3507   haifa_sched_init ();
3508   sched_rgn_init (reload_completed);
3509 
3510   bitmap_initialize (&not_in_df, &bitmap_default_obstack);
3511 
3512   /* Schedule every region in the subroutine.  */
3513   for (rgn = 0; rgn < nr_regions; rgn++)
3514     if (dbg_cnt (sched_region))
3515       schedule_region (rgn);
3516 
3517   /* Clean up.  */
3518   sched_rgn_finish ();
3519   bitmap_release (&not_in_df);
3520 
3521   haifa_sched_finish ();
3522 }
3523 
3524 /* INSN has been added to/removed from current region.  */
3525 static void
rgn_add_remove_insn(rtx_insn * insn,int remove_p)3526 rgn_add_remove_insn (rtx_insn *insn, int remove_p)
3527 {
3528   if (!remove_p)
3529     rgn_n_insns++;
3530   else
3531     rgn_n_insns--;
3532 
3533   if (INSN_BB (insn) == target_bb)
3534     {
3535       if (!remove_p)
3536 	target_n_insns++;
3537       else
3538 	target_n_insns--;
3539     }
3540 }
3541 
3542 /* Extend internal data structures.  */
3543 void
extend_regions(void)3544 extend_regions (void)
3545 {
3546   rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks_for_fn (cfun));
3547   rgn_bb_table = XRESIZEVEC (int, rgn_bb_table,
3548 			     n_basic_blocks_for_fn (cfun));
3549   block_to_bb = XRESIZEVEC (int, block_to_bb,
3550 			    last_basic_block_for_fn (cfun));
3551   containing_rgn = XRESIZEVEC (int, containing_rgn,
3552 			       last_basic_block_for_fn (cfun));
3553 }
3554 
3555 void
rgn_make_new_region_out_of_new_block(basic_block bb)3556 rgn_make_new_region_out_of_new_block (basic_block bb)
3557 {
3558   int i;
3559 
3560   i = RGN_BLOCKS (nr_regions);
3561   /* I - first free position in rgn_bb_table.  */
3562 
3563   rgn_bb_table[i] = bb->index;
3564   RGN_NR_BLOCKS (nr_regions) = 1;
3565   RGN_HAS_REAL_EBB (nr_regions) = 0;
3566   RGN_DONT_CALC_DEPS (nr_regions) = 0;
3567   CONTAINING_RGN (bb->index) = nr_regions;
3568   BLOCK_TO_BB (bb->index) = 0;
3569 
3570   nr_regions++;
3571 
3572   RGN_BLOCKS (nr_regions) = i + 1;
3573 }
3574 
3575 /* BB was added to ebb after AFTER.  */
3576 static void
rgn_add_block(basic_block bb,basic_block after)3577 rgn_add_block (basic_block bb, basic_block after)
3578 {
3579   extend_regions ();
3580   bitmap_set_bit (&not_in_df, bb->index);
3581 
3582   if (after == 0 || after == EXIT_BLOCK_PTR_FOR_FN (cfun))
3583     {
3584       rgn_make_new_region_out_of_new_block (bb);
3585       RGN_DONT_CALC_DEPS (nr_regions - 1) = (after
3586 					     == EXIT_BLOCK_PTR_FOR_FN (cfun));
3587     }
3588   else
3589     {
3590       int i, pos;
3591 
3592       /* We need to fix rgn_table, block_to_bb, containing_rgn
3593 	 and ebb_head.  */
3594 
3595       BLOCK_TO_BB (bb->index) = BLOCK_TO_BB (after->index);
3596 
3597       /* We extend ebb_head to one more position to
3598 	 easily find the last position of the last ebb in
3599 	 the current region.  Thus, ebb_head[BLOCK_TO_BB (after) + 1]
3600 	 is _always_ valid for access.  */
3601 
3602       i = BLOCK_TO_BB (after->index) + 1;
3603       pos = ebb_head[i] - 1;
3604       /* Now POS is the index of the last block in the region.  */
3605 
3606       /* Find index of basic block AFTER.  */
3607       for (; rgn_bb_table[pos] != after->index; pos--)
3608 	;
3609 
3610       pos++;
3611       gcc_assert (pos > ebb_head[i - 1]);
3612 
3613       /* i - ebb right after "AFTER".  */
3614       /* ebb_head[i] - VALID.  */
3615 
3616       /* Source position: ebb_head[i]
3617 	 Destination position: ebb_head[i] + 1
3618 	 Last position:
3619 	   RGN_BLOCKS (nr_regions) - 1
3620 	 Number of elements to copy: (last_position) - (source_position) + 1
3621        */
3622 
3623       memmove (rgn_bb_table + pos + 1,
3624 	       rgn_bb_table + pos,
3625 	       ((RGN_BLOCKS (nr_regions) - 1) - (pos) + 1)
3626 	       * sizeof (*rgn_bb_table));
3627 
3628       rgn_bb_table[pos] = bb->index;
3629 
3630       for (; i <= current_nr_blocks; i++)
3631 	ebb_head [i]++;
3632 
3633       i = CONTAINING_RGN (after->index);
3634       CONTAINING_RGN (bb->index) = i;
3635 
3636       RGN_HAS_REAL_EBB (i) = 1;
3637 
3638       for (++i; i <= nr_regions; i++)
3639 	RGN_BLOCKS (i)++;
3640     }
3641 }
3642 
3643 /* Fix internal data after interblock movement of jump instruction.
3644    For parameter meaning please refer to
3645    sched-int.h: struct sched_info: fix_recovery_cfg.  */
3646 static void
rgn_fix_recovery_cfg(int bbi,int check_bbi,int check_bb_nexti)3647 rgn_fix_recovery_cfg (int bbi, int check_bbi, int check_bb_nexti)
3648 {
3649   int old_pos, new_pos, i;
3650 
3651   BLOCK_TO_BB (check_bb_nexti) = BLOCK_TO_BB (bbi);
3652 
3653   for (old_pos = ebb_head[BLOCK_TO_BB (check_bbi) + 1] - 1;
3654        rgn_bb_table[old_pos] != check_bb_nexti;
3655        old_pos--)
3656     ;
3657   gcc_assert (old_pos > ebb_head[BLOCK_TO_BB (check_bbi)]);
3658 
3659   for (new_pos = ebb_head[BLOCK_TO_BB (bbi) + 1] - 1;
3660        rgn_bb_table[new_pos] != bbi;
3661        new_pos--)
3662     ;
3663   new_pos++;
3664   gcc_assert (new_pos > ebb_head[BLOCK_TO_BB (bbi)]);
3665 
3666   gcc_assert (new_pos < old_pos);
3667 
3668   memmove (rgn_bb_table + new_pos + 1,
3669 	   rgn_bb_table + new_pos,
3670 	   (old_pos - new_pos) * sizeof (*rgn_bb_table));
3671 
3672   rgn_bb_table[new_pos] = check_bb_nexti;
3673 
3674   for (i = BLOCK_TO_BB (bbi) + 1; i <= BLOCK_TO_BB (check_bbi); i++)
3675     ebb_head[i]++;
3676 }
3677 
3678 /* Return next block in ebb chain.  For parameter meaning please refer to
3679    sched-int.h: struct sched_info: advance_target_bb.  */
3680 static basic_block
advance_target_bb(basic_block bb,rtx_insn * insn)3681 advance_target_bb (basic_block bb, rtx_insn *insn)
3682 {
3683   if (insn)
3684     return 0;
3685 
3686   gcc_assert (BLOCK_TO_BB (bb->index) == target_bb
3687 	      && BLOCK_TO_BB (bb->next_bb->index) == target_bb);
3688   return bb->next_bb;
3689 }
3690 
3691 #endif
3692 
3693 /* Run instruction scheduler.  */
3694 static unsigned int
rest_of_handle_live_range_shrinkage(void)3695 rest_of_handle_live_range_shrinkage (void)
3696 {
3697 #ifdef INSN_SCHEDULING
3698   int saved;
3699 
3700   initialize_live_range_shrinkage ();
3701   saved = flag_schedule_interblock;
3702   flag_schedule_interblock = false;
3703   schedule_insns ();
3704   flag_schedule_interblock = saved;
3705   finish_live_range_shrinkage ();
3706 #endif
3707   return 0;
3708 }
3709 
3710 /* Run instruction scheduler.  */
3711 static unsigned int
rest_of_handle_sched(void)3712 rest_of_handle_sched (void)
3713 {
3714 #ifdef INSN_SCHEDULING
3715   if (flag_selective_scheduling
3716       && ! maybe_skip_selective_scheduling ())
3717     run_selective_scheduling ();
3718   else
3719     schedule_insns ();
3720 #endif
3721   return 0;
3722 }
3723 
3724 /* Run second scheduling pass after reload.  */
3725 static unsigned int
rest_of_handle_sched2(void)3726 rest_of_handle_sched2 (void)
3727 {
3728 #ifdef INSN_SCHEDULING
3729   if (flag_selective_scheduling2
3730       && ! maybe_skip_selective_scheduling ())
3731     run_selective_scheduling ();
3732   else
3733     {
3734       /* Do control and data sched analysis again,
3735 	 and write some more of the results to dump file.  */
3736       if (flag_sched2_use_superblocks)
3737 	schedule_ebbs ();
3738       else
3739 	schedule_insns ();
3740     }
3741 #endif
3742   return 0;
3743 }
3744 
3745 static unsigned int
rest_of_handle_sched_fusion(void)3746 rest_of_handle_sched_fusion (void)
3747 {
3748 #ifdef INSN_SCHEDULING
3749   sched_fusion = true;
3750   schedule_insns ();
3751   sched_fusion = false;
3752 #endif
3753   return 0;
3754 }
3755 
3756 namespace {
3757 
3758 const pass_data pass_data_live_range_shrinkage =
3759 {
3760   RTL_PASS, /* type */
3761   "lr_shrinkage", /* name */
3762   OPTGROUP_NONE, /* optinfo_flags */
3763   TV_LIVE_RANGE_SHRINKAGE, /* tv_id */
3764   0, /* properties_required */
3765   0, /* properties_provided */
3766   0, /* properties_destroyed */
3767   0, /* todo_flags_start */
3768   TODO_df_finish, /* todo_flags_finish */
3769 };
3770 
3771 class pass_live_range_shrinkage : public rtl_opt_pass
3772 {
3773 public:
pass_live_range_shrinkage(gcc::context * ctxt)3774   pass_live_range_shrinkage(gcc::context *ctxt)
3775     : rtl_opt_pass(pass_data_live_range_shrinkage, ctxt)
3776   {}
3777 
3778   /* opt_pass methods: */
gate(function *)3779   virtual bool gate (function *)
3780     {
3781 #ifdef INSN_SCHEDULING
3782       return flag_live_range_shrinkage;
3783 #else
3784       return 0;
3785 #endif
3786     }
3787 
execute(function *)3788   virtual unsigned int execute (function *)
3789     {
3790       return rest_of_handle_live_range_shrinkage ();
3791     }
3792 
3793 }; // class pass_live_range_shrinkage
3794 
3795 } // anon namespace
3796 
3797 rtl_opt_pass *
make_pass_live_range_shrinkage(gcc::context * ctxt)3798 make_pass_live_range_shrinkage (gcc::context *ctxt)
3799 {
3800   return new pass_live_range_shrinkage (ctxt);
3801 }
3802 
3803 namespace {
3804 
3805 const pass_data pass_data_sched =
3806 {
3807   RTL_PASS, /* type */
3808   "sched1", /* name */
3809   OPTGROUP_NONE, /* optinfo_flags */
3810   TV_SCHED, /* tv_id */
3811   0, /* properties_required */
3812   0, /* properties_provided */
3813   0, /* properties_destroyed */
3814   0, /* todo_flags_start */
3815   TODO_df_finish, /* todo_flags_finish */
3816 };
3817 
3818 class pass_sched : public rtl_opt_pass
3819 {
3820 public:
pass_sched(gcc::context * ctxt)3821   pass_sched (gcc::context *ctxt)
3822     : rtl_opt_pass (pass_data_sched, ctxt)
3823   {}
3824 
3825   /* opt_pass methods: */
3826   virtual bool gate (function *);
execute(function *)3827   virtual unsigned int execute (function *) { return rest_of_handle_sched (); }
3828 
3829 }; // class pass_sched
3830 
3831 bool
gate(function *)3832 pass_sched::gate (function *)
3833 {
3834 #ifdef INSN_SCHEDULING
3835   return optimize > 0 && flag_schedule_insns && dbg_cnt (sched_func);
3836 #else
3837   return 0;
3838 #endif
3839 }
3840 
3841 } // anon namespace
3842 
3843 rtl_opt_pass *
make_pass_sched(gcc::context * ctxt)3844 make_pass_sched (gcc::context *ctxt)
3845 {
3846   return new pass_sched (ctxt);
3847 }
3848 
3849 namespace {
3850 
3851 const pass_data pass_data_sched2 =
3852 {
3853   RTL_PASS, /* type */
3854   "sched2", /* name */
3855   OPTGROUP_NONE, /* optinfo_flags */
3856   TV_SCHED2, /* tv_id */
3857   0, /* properties_required */
3858   0, /* properties_provided */
3859   0, /* properties_destroyed */
3860   0, /* todo_flags_start */
3861   TODO_df_finish, /* todo_flags_finish */
3862 };
3863 
3864 class pass_sched2 : public rtl_opt_pass
3865 {
3866 public:
pass_sched2(gcc::context * ctxt)3867   pass_sched2 (gcc::context *ctxt)
3868     : rtl_opt_pass (pass_data_sched2, ctxt)
3869   {}
3870 
3871   /* opt_pass methods: */
3872   virtual bool gate (function *);
execute(function *)3873   virtual unsigned int execute (function *)
3874     {
3875       return rest_of_handle_sched2 ();
3876     }
3877 
3878 }; // class pass_sched2
3879 
3880 bool
gate(function *)3881 pass_sched2::gate (function *)
3882 {
3883 #ifdef INSN_SCHEDULING
3884   return optimize > 0 && flag_schedule_insns_after_reload
3885     && !targetm.delay_sched2 && dbg_cnt (sched2_func);
3886 #else
3887   return 0;
3888 #endif
3889 }
3890 
3891 } // anon namespace
3892 
3893 rtl_opt_pass *
make_pass_sched2(gcc::context * ctxt)3894 make_pass_sched2 (gcc::context *ctxt)
3895 {
3896   return new pass_sched2 (ctxt);
3897 }
3898 
3899 namespace {
3900 
3901 const pass_data pass_data_sched_fusion =
3902 {
3903   RTL_PASS, /* type */
3904   "sched_fusion", /* name */
3905   OPTGROUP_NONE, /* optinfo_flags */
3906   TV_SCHED_FUSION, /* tv_id */
3907   0, /* properties_required */
3908   0, /* properties_provided */
3909   0, /* properties_destroyed */
3910   0, /* todo_flags_start */
3911   TODO_df_finish, /* todo_flags_finish */
3912 };
3913 
3914 class pass_sched_fusion : public rtl_opt_pass
3915 {
3916 public:
pass_sched_fusion(gcc::context * ctxt)3917   pass_sched_fusion (gcc::context *ctxt)
3918     : rtl_opt_pass (pass_data_sched_fusion, ctxt)
3919   {}
3920 
3921   /* opt_pass methods: */
3922   virtual bool gate (function *);
execute(function *)3923   virtual unsigned int execute (function *)
3924     {
3925       return rest_of_handle_sched_fusion ();
3926     }
3927 
3928 }; // class pass_sched2
3929 
3930 bool
gate(function *)3931 pass_sched_fusion::gate (function *)
3932 {
3933 #ifdef INSN_SCHEDULING
3934   /* Scheduling fusion relies on peephole2 to do real fusion work,
3935      so only enable it if peephole2 is in effect.  */
3936   return (optimize > 0 && flag_peephole2
3937     && flag_schedule_fusion && targetm.sched.fusion_priority != NULL);
3938 #else
3939   return 0;
3940 #endif
3941 }
3942 
3943 } // anon namespace
3944 
3945 rtl_opt_pass *
make_pass_sched_fusion(gcc::context * ctxt)3946 make_pass_sched_fusion (gcc::context *ctxt)
3947 {
3948   return new pass_sched_fusion (ctxt);
3949 }
3950