xref: /dragonfly/contrib/gcc-8.0/gcc/sched-ebb.c (revision 38fd1498)
1*38fd1498Szrj /* Instruction scheduling pass.
2*38fd1498Szrj    Copyright (C) 1992-2018 Free Software Foundation, Inc.
3*38fd1498Szrj    Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4*38fd1498Szrj    and currently maintained by, Jim Wilson (wilson@cygnus.com)
5*38fd1498Szrj 
6*38fd1498Szrj This file is part of GCC.
7*38fd1498Szrj 
8*38fd1498Szrj GCC is free software; you can redistribute it and/or modify it under
9*38fd1498Szrj the terms of the GNU General Public License as published by the Free
10*38fd1498Szrj Software Foundation; either version 3, or (at your option) any later
11*38fd1498Szrj version.
12*38fd1498Szrj 
13*38fd1498Szrj GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14*38fd1498Szrj WARRANTY; without even the implied warranty of MERCHANTABILITY or
15*38fd1498Szrj FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16*38fd1498Szrj for more details.
17*38fd1498Szrj 
18*38fd1498Szrj You should have received a copy of the GNU General Public License
19*38fd1498Szrj along with GCC; see the file COPYING3.  If not see
20*38fd1498Szrj <http://www.gnu.org/licenses/>.  */
21*38fd1498Szrj 
22*38fd1498Szrj #include "config.h"
23*38fd1498Szrj #include "system.h"
24*38fd1498Szrj #include "coretypes.h"
25*38fd1498Szrj #include "backend.h"
26*38fd1498Szrj #include "target.h"
27*38fd1498Szrj #include "rtl.h"
28*38fd1498Szrj #include "cfghooks.h"
29*38fd1498Szrj #include "df.h"
30*38fd1498Szrj #include "profile.h"
31*38fd1498Szrj #include "insn-attr.h"
32*38fd1498Szrj #include "params.h"
33*38fd1498Szrj #include "cfgrtl.h"
34*38fd1498Szrj #include "cfgbuild.h"
35*38fd1498Szrj #include "sched-int.h"
36*38fd1498Szrj 
37*38fd1498Szrj 
38*38fd1498Szrj #ifdef INSN_SCHEDULING
39*38fd1498Szrj 
40*38fd1498Szrj /* The number of insns to be scheduled in total.  */
41*38fd1498Szrj static int rgn_n_insns;
42*38fd1498Szrj 
43*38fd1498Szrj /* The number of insns scheduled so far.  */
44*38fd1498Szrj static int sched_rgn_n_insns;
45*38fd1498Szrj 
46*38fd1498Szrj /* Set of blocks, that already have their dependencies calculated.  */
47*38fd1498Szrj static bitmap_head dont_calc_deps;
48*38fd1498Szrj 
49*38fd1498Szrj /* Last basic block in current ebb.  */
50*38fd1498Szrj static basic_block last_bb;
51*38fd1498Szrj 
52*38fd1498Szrj /* Implementations of the sched_info functions for region scheduling.  */
53*38fd1498Szrj static void init_ready_list (void);
54*38fd1498Szrj static void begin_schedule_ready (rtx_insn *);
55*38fd1498Szrj static int schedule_more_p (void);
56*38fd1498Szrj static const char *ebb_print_insn (const rtx_insn *, int);
57*38fd1498Szrj static int rank (rtx_insn *, rtx_insn *);
58*38fd1498Szrj static int ebb_contributes_to_priority (rtx_insn *, rtx_insn *);
59*38fd1498Szrj static basic_block earliest_block_with_similiar_load (basic_block, rtx);
60*38fd1498Szrj static void add_deps_for_risky_insns (rtx_insn *, rtx_insn *);
61*38fd1498Szrj static void debug_ebb_dependencies (rtx_insn *, rtx_insn *);
62*38fd1498Szrj 
63*38fd1498Szrj static void ebb_add_remove_insn (rtx_insn *, int);
64*38fd1498Szrj static void ebb_add_block (basic_block, basic_block);
65*38fd1498Szrj static basic_block advance_target_bb (basic_block, rtx_insn *);
66*38fd1498Szrj static void ebb_fix_recovery_cfg (int, int, int);
67*38fd1498Szrj 
68*38fd1498Szrj /* Allocate memory and store the state of the frontend.  Return the allocated
69*38fd1498Szrj    memory.  */
70*38fd1498Szrj static void *
save_ebb_state(void)71*38fd1498Szrj save_ebb_state (void)
72*38fd1498Szrj {
73*38fd1498Szrj   int *p = XNEW (int);
74*38fd1498Szrj   *p = sched_rgn_n_insns;
75*38fd1498Szrj   return p;
76*38fd1498Szrj }
77*38fd1498Szrj 
78*38fd1498Szrj /* Restore the state of the frontend from P_, then free it.  */
79*38fd1498Szrj static void
restore_ebb_state(void * p_)80*38fd1498Szrj restore_ebb_state (void *p_)
81*38fd1498Szrj {
82*38fd1498Szrj   int *p = (int *)p_;
83*38fd1498Szrj   sched_rgn_n_insns = *p;
84*38fd1498Szrj   free (p_);
85*38fd1498Szrj }
86*38fd1498Szrj 
87*38fd1498Szrj /* Return nonzero if there are more insns that should be scheduled.  */
88*38fd1498Szrj 
89*38fd1498Szrj static int
schedule_more_p(void)90*38fd1498Szrj schedule_more_p (void)
91*38fd1498Szrj {
92*38fd1498Szrj   return sched_rgn_n_insns < rgn_n_insns;
93*38fd1498Szrj }
94*38fd1498Szrj 
95*38fd1498Szrj /* Print dependency information about ebb between HEAD and TAIL.  */
96*38fd1498Szrj static void
debug_ebb_dependencies(rtx_insn * head,rtx_insn * tail)97*38fd1498Szrj debug_ebb_dependencies (rtx_insn *head, rtx_insn *tail)
98*38fd1498Szrj {
99*38fd1498Szrj   fprintf (sched_dump,
100*38fd1498Szrj 	   ";;   --------------- forward dependences: ------------ \n");
101*38fd1498Szrj 
102*38fd1498Szrj   fprintf (sched_dump, "\n;;   --- EBB Dependences --- from bb%d to bb%d \n",
103*38fd1498Szrj 	   BLOCK_NUM (head), BLOCK_NUM (tail));
104*38fd1498Szrj 
105*38fd1498Szrj   debug_dependencies (head, tail);
106*38fd1498Szrj }
107*38fd1498Szrj 
108*38fd1498Szrj /* Add all insns that are initially ready to the ready list READY.  Called
109*38fd1498Szrj    once before scheduling a set of insns.  */
110*38fd1498Szrj 
111*38fd1498Szrj static void
init_ready_list(void)112*38fd1498Szrj init_ready_list (void)
113*38fd1498Szrj {
114*38fd1498Szrj   int n = 0;
115*38fd1498Szrj   rtx_insn *prev_head = current_sched_info->prev_head;
116*38fd1498Szrj   rtx_insn *next_tail = current_sched_info->next_tail;
117*38fd1498Szrj   rtx_insn *insn;
118*38fd1498Szrj 
119*38fd1498Szrj   sched_rgn_n_insns = 0;
120*38fd1498Szrj 
121*38fd1498Szrj   /* Print debugging information.  */
122*38fd1498Szrj   if (sched_verbose >= 5)
123*38fd1498Szrj     debug_ebb_dependencies (NEXT_INSN (prev_head), PREV_INSN (next_tail));
124*38fd1498Szrj 
125*38fd1498Szrj   /* Initialize ready list with all 'ready' insns in target block.
126*38fd1498Szrj      Count number of insns in the target block being scheduled.  */
127*38fd1498Szrj   for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
128*38fd1498Szrj     {
129*38fd1498Szrj       try_ready (insn);
130*38fd1498Szrj       n++;
131*38fd1498Szrj     }
132*38fd1498Szrj 
133*38fd1498Szrj   gcc_assert (n == rgn_n_insns);
134*38fd1498Szrj }
135*38fd1498Szrj 
136*38fd1498Szrj /* INSN is being scheduled after LAST.  Update counters.  */
137*38fd1498Szrj static void
begin_schedule_ready(rtx_insn * insn ATTRIBUTE_UNUSED)138*38fd1498Szrj begin_schedule_ready (rtx_insn *insn ATTRIBUTE_UNUSED)
139*38fd1498Szrj {
140*38fd1498Szrj   sched_rgn_n_insns++;
141*38fd1498Szrj }
142*38fd1498Szrj 
143*38fd1498Szrj /* INSN is being moved to its place in the schedule, after LAST.  */
144*38fd1498Szrj static void
begin_move_insn(rtx_insn * insn,rtx_insn * last)145*38fd1498Szrj begin_move_insn (rtx_insn *insn, rtx_insn *last)
146*38fd1498Szrj {
147*38fd1498Szrj   if (BLOCK_FOR_INSN (insn) == last_bb
148*38fd1498Szrj       /* INSN is a jump in the last block, ...  */
149*38fd1498Szrj       && control_flow_insn_p (insn)
150*38fd1498Szrj       /* that is going to be moved over some instructions.  */
151*38fd1498Szrj       && last != PREV_INSN (insn))
152*38fd1498Szrj     {
153*38fd1498Szrj       edge e;
154*38fd1498Szrj       basic_block bb;
155*38fd1498Szrj 
156*38fd1498Szrj       /* An obscure special case, where we do have partially dead
157*38fd1498Szrj 	 instruction scheduled after last control flow instruction.
158*38fd1498Szrj 	 In this case we can create new basic block.  It is
159*38fd1498Szrj 	 always exactly one basic block last in the sequence.  */
160*38fd1498Szrj 
161*38fd1498Szrj       e = find_fallthru_edge (last_bb->succs);
162*38fd1498Szrj 
163*38fd1498Szrj       gcc_checking_assert (!e || !(e->flags & EDGE_COMPLEX));
164*38fd1498Szrj 
165*38fd1498Szrj       gcc_checking_assert (BLOCK_FOR_INSN (insn) == last_bb
166*38fd1498Szrj 			   && !IS_SPECULATION_CHECK_P (insn)
167*38fd1498Szrj 			   && BB_HEAD (last_bb) != insn
168*38fd1498Szrj 			   && BB_END (last_bb) == insn);
169*38fd1498Szrj 
170*38fd1498Szrj       {
171*38fd1498Szrj 	rtx_insn *x = NEXT_INSN (insn);
172*38fd1498Szrj 	if (e)
173*38fd1498Szrj 	  gcc_checking_assert (NOTE_P (x) || LABEL_P (x));
174*38fd1498Szrj 	else
175*38fd1498Szrj 	  gcc_checking_assert (BARRIER_P (x));
176*38fd1498Szrj       }
177*38fd1498Szrj 
178*38fd1498Szrj       if (e)
179*38fd1498Szrj 	{
180*38fd1498Szrj 	  bb = split_edge (e);
181*38fd1498Szrj 	  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb)));
182*38fd1498Szrj 	}
183*38fd1498Szrj       else
184*38fd1498Szrj 	{
185*38fd1498Szrj 	  /* Create an empty unreachable block after the INSN.  */
186*38fd1498Szrj 	  rtx_insn *next = NEXT_INSN (insn);
187*38fd1498Szrj 	  if (next && BARRIER_P (next))
188*38fd1498Szrj 	    next = NEXT_INSN (next);
189*38fd1498Szrj 	  bb = create_basic_block (next, NULL_RTX, last_bb);
190*38fd1498Szrj 	}
191*38fd1498Szrj 
192*38fd1498Szrj       /* split_edge () creates BB before E->DEST.  Keep in mind, that
193*38fd1498Szrj 	 this operation extends scheduling region till the end of BB.
194*38fd1498Szrj 	 Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out
195*38fd1498Szrj 	 of the scheduling region.  */
196*38fd1498Szrj       current_sched_info->next_tail = NEXT_INSN (BB_END (bb));
197*38fd1498Szrj       gcc_assert (current_sched_info->next_tail);
198*38fd1498Szrj 
199*38fd1498Szrj       /* Append new basic block to the end of the ebb.  */
200*38fd1498Szrj       sched_init_only_bb (bb, last_bb);
201*38fd1498Szrj       gcc_assert (last_bb == bb);
202*38fd1498Szrj     }
203*38fd1498Szrj }
204*38fd1498Szrj 
205*38fd1498Szrj /* Return a string that contains the insn uid and optionally anything else
206*38fd1498Szrj    necessary to identify this insn in an output.  It's valid to use a
207*38fd1498Szrj    static buffer for this.  The ALIGNED parameter should cause the string
208*38fd1498Szrj    to be formatted so that multiple output lines will line up nicely.  */
209*38fd1498Szrj 
210*38fd1498Szrj static const char *
ebb_print_insn(const rtx_insn * insn,int aligned ATTRIBUTE_UNUSED)211*38fd1498Szrj ebb_print_insn (const rtx_insn *insn, int aligned ATTRIBUTE_UNUSED)
212*38fd1498Szrj {
213*38fd1498Szrj   static char tmp[80];
214*38fd1498Szrj 
215*38fd1498Szrj   /* '+' before insn means it is a new cycle start.  */
216*38fd1498Szrj   if (GET_MODE (insn) == TImode)
217*38fd1498Szrj     sprintf (tmp, "+ %4d", INSN_UID (insn));
218*38fd1498Szrj   else
219*38fd1498Szrj     sprintf (tmp, "  %4d", INSN_UID (insn));
220*38fd1498Szrj 
221*38fd1498Szrj   return tmp;
222*38fd1498Szrj }
223*38fd1498Szrj 
224*38fd1498Szrj /* Compare priority of two insns.  Return a positive number if the second
225*38fd1498Szrj    insn is to be preferred for scheduling, and a negative one if the first
226*38fd1498Szrj    is to be preferred.  Zero if they are equally good.  */
227*38fd1498Szrj 
228*38fd1498Szrj static int
rank(rtx_insn * insn1,rtx_insn * insn2)229*38fd1498Szrj rank (rtx_insn *insn1, rtx_insn *insn2)
230*38fd1498Szrj {
231*38fd1498Szrj   basic_block bb1 = BLOCK_FOR_INSN (insn1);
232*38fd1498Szrj   basic_block bb2 = BLOCK_FOR_INSN (insn2);
233*38fd1498Szrj 
234*38fd1498Szrj   if (bb1->count > bb2->count)
235*38fd1498Szrj     return -1;
236*38fd1498Szrj   if (bb1->count < bb2->count)
237*38fd1498Szrj     return 1;
238*38fd1498Szrj   return 0;
239*38fd1498Szrj }
240*38fd1498Szrj 
241*38fd1498Szrj /* NEXT is an instruction that depends on INSN (a backward dependence);
242*38fd1498Szrj    return nonzero if we should include this dependence in priority
243*38fd1498Szrj    calculations.  */
244*38fd1498Szrj 
245*38fd1498Szrj static int
ebb_contributes_to_priority(rtx_insn * next ATTRIBUTE_UNUSED,rtx_insn * insn ATTRIBUTE_UNUSED)246*38fd1498Szrj ebb_contributes_to_priority (rtx_insn *next ATTRIBUTE_UNUSED,
247*38fd1498Szrj                              rtx_insn *insn ATTRIBUTE_UNUSED)
248*38fd1498Szrj {
249*38fd1498Szrj   return 1;
250*38fd1498Szrj }
251*38fd1498Szrj 
252*38fd1498Szrj  /* INSN is a JUMP_INSN.  Store the set of registers that
253*38fd1498Szrj     must be considered as used by this jump in USED.  */
254*38fd1498Szrj 
255*38fd1498Szrj void
ebb_compute_jump_reg_dependencies(rtx insn,regset used)256*38fd1498Szrj ebb_compute_jump_reg_dependencies (rtx insn, regset used)
257*38fd1498Szrj {
258*38fd1498Szrj   basic_block b = BLOCK_FOR_INSN (insn);
259*38fd1498Szrj   edge e;
260*38fd1498Szrj   edge_iterator ei;
261*38fd1498Szrj 
262*38fd1498Szrj   FOR_EACH_EDGE (e, ei, b->succs)
263*38fd1498Szrj     if ((e->flags & EDGE_FALLTHRU) == 0)
264*38fd1498Szrj       bitmap_ior_into (used, df_get_live_in (e->dest));
265*38fd1498Szrj }
266*38fd1498Szrj 
267*38fd1498Szrj /* Used in schedule_insns to initialize current_sched_info for scheduling
268*38fd1498Szrj    regions (or single basic blocks).  */
269*38fd1498Szrj 
270*38fd1498Szrj static struct common_sched_info_def ebb_common_sched_info;
271*38fd1498Szrj 
272*38fd1498Szrj static struct sched_deps_info_def ebb_sched_deps_info =
273*38fd1498Szrj   {
274*38fd1498Szrj     ebb_compute_jump_reg_dependencies,
275*38fd1498Szrj     NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
276*38fd1498Szrj     NULL,
277*38fd1498Szrj     1, 0, 0
278*38fd1498Szrj   };
279*38fd1498Szrj 
280*38fd1498Szrj static struct haifa_sched_info ebb_sched_info =
281*38fd1498Szrj {
282*38fd1498Szrj   init_ready_list,
283*38fd1498Szrj   NULL,
284*38fd1498Szrj   schedule_more_p,
285*38fd1498Szrj   NULL,
286*38fd1498Szrj   rank,
287*38fd1498Szrj   ebb_print_insn,
288*38fd1498Szrj   ebb_contributes_to_priority,
289*38fd1498Szrj   NULL, /* insn_finishes_block_p */
290*38fd1498Szrj 
291*38fd1498Szrj   NULL, NULL,
292*38fd1498Szrj   NULL, NULL,
293*38fd1498Szrj   1, 0,
294*38fd1498Szrj 
295*38fd1498Szrj   ebb_add_remove_insn,
296*38fd1498Szrj   begin_schedule_ready,
297*38fd1498Szrj   begin_move_insn,
298*38fd1498Szrj   advance_target_bb,
299*38fd1498Szrj 
300*38fd1498Szrj   save_ebb_state,
301*38fd1498Szrj   restore_ebb_state,
302*38fd1498Szrj 
303*38fd1498Szrj   SCHED_EBB
304*38fd1498Szrj   /* We can create new blocks in begin_schedule_ready ().  */
305*38fd1498Szrj   | NEW_BBS
306*38fd1498Szrj };
307*38fd1498Szrj 
308*38fd1498Szrj /* Returns the earliest block in EBB currently being processed where a
309*38fd1498Szrj    "similar load" 'insn2' is found, and hence LOAD_INSN can move
310*38fd1498Szrj    speculatively into the found block.  All the following must hold:
311*38fd1498Szrj 
312*38fd1498Szrj    (1) both loads have 1 base register (PFREE_CANDIDATEs).
313*38fd1498Szrj    (2) load_insn and load2 have a def-use dependence upon
314*38fd1498Szrj    the same insn 'insn1'.
315*38fd1498Szrj 
316*38fd1498Szrj    From all these we can conclude that the two loads access memory
317*38fd1498Szrj    addresses that differ at most by a constant, and hence if moving
318*38fd1498Szrj    load_insn would cause an exception, it would have been caused by
319*38fd1498Szrj    load2 anyhow.
320*38fd1498Szrj 
321*38fd1498Szrj    The function uses list (given by LAST_BLOCK) of already processed
322*38fd1498Szrj    blocks in EBB.  The list is formed in `add_deps_for_risky_insns'.  */
323*38fd1498Szrj 
324*38fd1498Szrj static basic_block
earliest_block_with_similiar_load(basic_block last_block,rtx load_insn)325*38fd1498Szrj earliest_block_with_similiar_load (basic_block last_block, rtx load_insn)
326*38fd1498Szrj {
327*38fd1498Szrj   sd_iterator_def back_sd_it;
328*38fd1498Szrj   dep_t back_dep;
329*38fd1498Szrj   basic_block bb, earliest_block = NULL;
330*38fd1498Szrj 
331*38fd1498Szrj   FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
332*38fd1498Szrj     {
333*38fd1498Szrj       rtx_insn *insn1 = DEP_PRO (back_dep);
334*38fd1498Szrj 
335*38fd1498Szrj       if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
336*38fd1498Szrj 	/* Found a DEF-USE dependence (insn1, load_insn).  */
337*38fd1498Szrj 	{
338*38fd1498Szrj 	  sd_iterator_def fore_sd_it;
339*38fd1498Szrj 	  dep_t fore_dep;
340*38fd1498Szrj 
341*38fd1498Szrj 	  FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
342*38fd1498Szrj 	    {
343*38fd1498Szrj 	      rtx_insn *insn2 = DEP_CON (fore_dep);
344*38fd1498Szrj 	      basic_block insn2_block = BLOCK_FOR_INSN (insn2);
345*38fd1498Szrj 
346*38fd1498Szrj 	      if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
347*38fd1498Szrj 		{
348*38fd1498Szrj 		  if (earliest_block != NULL
349*38fd1498Szrj 		      && earliest_block->index < insn2_block->index)
350*38fd1498Szrj 		    continue;
351*38fd1498Szrj 
352*38fd1498Szrj 		  /* Found a DEF-USE dependence (insn1, insn2).  */
353*38fd1498Szrj 		  if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
354*38fd1498Szrj 		    /* insn2 not guaranteed to be a 1 base reg load.  */
355*38fd1498Szrj 		    continue;
356*38fd1498Szrj 
357*38fd1498Szrj 		  for (bb = last_block; bb; bb = (basic_block) bb->aux)
358*38fd1498Szrj 		    if (insn2_block == bb)
359*38fd1498Szrj 		      break;
360*38fd1498Szrj 
361*38fd1498Szrj 		  if (!bb)
362*38fd1498Szrj 		    /* insn2 is the similar load.  */
363*38fd1498Szrj 		    earliest_block = insn2_block;
364*38fd1498Szrj 		}
365*38fd1498Szrj 	    }
366*38fd1498Szrj 	}
367*38fd1498Szrj     }
368*38fd1498Szrj 
369*38fd1498Szrj   return earliest_block;
370*38fd1498Szrj }
371*38fd1498Szrj 
372*38fd1498Szrj /* The following function adds dependencies between jumps and risky
373*38fd1498Szrj    insns in given ebb.  */
374*38fd1498Szrj 
375*38fd1498Szrj static void
add_deps_for_risky_insns(rtx_insn * head,rtx_insn * tail)376*38fd1498Szrj add_deps_for_risky_insns (rtx_insn *head, rtx_insn *tail)
377*38fd1498Szrj {
378*38fd1498Szrj   rtx_insn *insn, *prev;
379*38fd1498Szrj   int classification;
380*38fd1498Szrj   rtx_insn *last_jump = NULL;
381*38fd1498Szrj   rtx_insn *next_tail = NEXT_INSN (tail);
382*38fd1498Szrj   basic_block last_block = NULL, bb;
383*38fd1498Szrj 
384*38fd1498Szrj   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
385*38fd1498Szrj     {
386*38fd1498Szrj       add_delay_dependencies (insn);
387*38fd1498Szrj       if (control_flow_insn_p (insn))
388*38fd1498Szrj 	{
389*38fd1498Szrj 	  bb = BLOCK_FOR_INSN (insn);
390*38fd1498Szrj 	  bb->aux = last_block;
391*38fd1498Szrj 	  last_block = bb;
392*38fd1498Szrj 	  /* Ensure blocks stay in the same order.  */
393*38fd1498Szrj 	  if (last_jump)
394*38fd1498Szrj 	    add_dependence (insn, last_jump, REG_DEP_ANTI);
395*38fd1498Szrj 	  last_jump = insn;
396*38fd1498Szrj 	}
397*38fd1498Szrj       else if (INSN_P (insn) && last_jump != NULL_RTX)
398*38fd1498Szrj 	{
399*38fd1498Szrj 	  classification = haifa_classify_insn (insn);
400*38fd1498Szrj 	  prev = last_jump;
401*38fd1498Szrj 
402*38fd1498Szrj 	  switch (classification)
403*38fd1498Szrj 	    {
404*38fd1498Szrj 	    case PFREE_CANDIDATE:
405*38fd1498Szrj 	      if (flag_schedule_speculative_load)
406*38fd1498Szrj 		{
407*38fd1498Szrj 		  bb = earliest_block_with_similiar_load (last_block, insn);
408*38fd1498Szrj 		  if (bb)
409*38fd1498Szrj 		    {
410*38fd1498Szrj 		      bb = (basic_block) bb->aux;
411*38fd1498Szrj 		      if (!bb)
412*38fd1498Szrj 			break;
413*38fd1498Szrj 		      prev = BB_END (bb);
414*38fd1498Szrj 		    }
415*38fd1498Szrj 		}
416*38fd1498Szrj 	      /* Fall through.  */
417*38fd1498Szrj 	    case TRAP_RISKY:
418*38fd1498Szrj 	    case IRISKY:
419*38fd1498Szrj 	    case PRISKY_CANDIDATE:
420*38fd1498Szrj 	      /* ??? We could implement better checking PRISKY_CANDIDATEs
421*38fd1498Szrj 		 analogous to sched-rgn.c.  */
422*38fd1498Szrj 	      /* We can not change the mode of the backward
423*38fd1498Szrj 		 dependency because REG_DEP_ANTI has the lowest
424*38fd1498Szrj 		 rank.  */
425*38fd1498Szrj 	      if (! sched_insns_conditions_mutex_p (insn, prev))
426*38fd1498Szrj 		{
427*38fd1498Szrj 		  if ((current_sched_info->flags & DO_SPECULATION)
428*38fd1498Szrj 		      && (spec_info->mask & BEGIN_CONTROL))
429*38fd1498Szrj 		    {
430*38fd1498Szrj 		      dep_def _dep, *dep = &_dep;
431*38fd1498Szrj 
432*38fd1498Szrj 		      init_dep (dep, prev, insn, REG_DEP_ANTI);
433*38fd1498Szrj 
434*38fd1498Szrj 		      if (current_sched_info->flags & USE_DEPS_LIST)
435*38fd1498Szrj 			{
436*38fd1498Szrj 			  DEP_STATUS (dep) = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
437*38fd1498Szrj 							   MAX_DEP_WEAK);
438*38fd1498Szrj 
439*38fd1498Szrj 			}
440*38fd1498Szrj 		      sd_add_or_update_dep (dep, false);
441*38fd1498Szrj 		    }
442*38fd1498Szrj 		  else
443*38fd1498Szrj 		    add_dependence (insn, prev, REG_DEP_CONTROL);
444*38fd1498Szrj 		}
445*38fd1498Szrj 
446*38fd1498Szrj 	      break;
447*38fd1498Szrj 
448*38fd1498Szrj 	    default:
449*38fd1498Szrj 	      break;
450*38fd1498Szrj 	    }
451*38fd1498Szrj 	}
452*38fd1498Szrj     }
453*38fd1498Szrj   /* Maintain the invariant that bb->aux is clear after use.  */
454*38fd1498Szrj   while (last_block)
455*38fd1498Szrj     {
456*38fd1498Szrj       bb = (basic_block) last_block->aux;
457*38fd1498Szrj       last_block->aux = NULL;
458*38fd1498Szrj       last_block = bb;
459*38fd1498Szrj     }
460*38fd1498Szrj }
461*38fd1498Szrj 
462*38fd1498Szrj /* Schedule a single extended basic block, defined by the boundaries
463*38fd1498Szrj    HEAD and TAIL.
464*38fd1498Szrj 
465*38fd1498Szrj    We change our expectations about scheduler behavior depending on
466*38fd1498Szrj    whether MODULO_SCHEDULING is true.  If it is, we expect that the
467*38fd1498Szrj    caller has already called set_modulo_params and created delay pairs
468*38fd1498Szrj    as appropriate.  If the modulo schedule failed, we return
469*38fd1498Szrj    NULL_RTX.  */
470*38fd1498Szrj 
471*38fd1498Szrj basic_block
schedule_ebb(rtx_insn * head,rtx_insn * tail,bool modulo_scheduling)472*38fd1498Szrj schedule_ebb (rtx_insn *head, rtx_insn *tail, bool modulo_scheduling)
473*38fd1498Szrj {
474*38fd1498Szrj   basic_block first_bb, target_bb;
475*38fd1498Szrj   struct deps_desc tmp_deps;
476*38fd1498Szrj   bool success;
477*38fd1498Szrj 
478*38fd1498Szrj   /* Blah.  We should fix the rest of the code not to get confused by
479*38fd1498Szrj      a note or two.  */
480*38fd1498Szrj   while (head != tail)
481*38fd1498Szrj     {
482*38fd1498Szrj       if (NOTE_P (head) || DEBUG_INSN_P (head))
483*38fd1498Szrj 	head = NEXT_INSN (head);
484*38fd1498Szrj       else if (NOTE_P (tail) || DEBUG_INSN_P (tail))
485*38fd1498Szrj 	tail = PREV_INSN (tail);
486*38fd1498Szrj       else if (LABEL_P (head))
487*38fd1498Szrj 	head = NEXT_INSN (head);
488*38fd1498Szrj       else
489*38fd1498Szrj 	break;
490*38fd1498Szrj     }
491*38fd1498Szrj 
492*38fd1498Szrj   first_bb = BLOCK_FOR_INSN (head);
493*38fd1498Szrj   last_bb = BLOCK_FOR_INSN (tail);
494*38fd1498Szrj 
495*38fd1498Szrj   if (no_real_insns_p (head, tail))
496*38fd1498Szrj     return BLOCK_FOR_INSN (tail);
497*38fd1498Szrj 
498*38fd1498Szrj   gcc_assert (INSN_P (head) && INSN_P (tail));
499*38fd1498Szrj 
500*38fd1498Szrj   if (!bitmap_bit_p (&dont_calc_deps, first_bb->index))
501*38fd1498Szrj     {
502*38fd1498Szrj       init_deps_global ();
503*38fd1498Szrj 
504*38fd1498Szrj       /* Compute dependencies.  */
505*38fd1498Szrj       init_deps (&tmp_deps, false);
506*38fd1498Szrj       sched_analyze (&tmp_deps, head, tail);
507*38fd1498Szrj       free_deps (&tmp_deps);
508*38fd1498Szrj 
509*38fd1498Szrj       add_deps_for_risky_insns (head, tail);
510*38fd1498Szrj 
511*38fd1498Szrj       if (targetm.sched.dependencies_evaluation_hook)
512*38fd1498Szrj         targetm.sched.dependencies_evaluation_hook (head, tail);
513*38fd1498Szrj 
514*38fd1498Szrj       finish_deps_global ();
515*38fd1498Szrj     }
516*38fd1498Szrj   else
517*38fd1498Szrj     /* Only recovery blocks can have their dependencies already calculated,
518*38fd1498Szrj        and they always are single block ebbs.  */
519*38fd1498Szrj     gcc_assert (first_bb == last_bb);
520*38fd1498Szrj 
521*38fd1498Szrj   /* Set priorities.  */
522*38fd1498Szrj   current_sched_info->sched_max_insns_priority = 0;
523*38fd1498Szrj   rgn_n_insns = set_priorities (head, tail);
524*38fd1498Szrj   current_sched_info->sched_max_insns_priority++;
525*38fd1498Szrj 
526*38fd1498Szrj   current_sched_info->prev_head = PREV_INSN (head);
527*38fd1498Szrj   current_sched_info->next_tail = NEXT_INSN (tail);
528*38fd1498Szrj 
529*38fd1498Szrj   remove_notes (head, tail);
530*38fd1498Szrj 
531*38fd1498Szrj   unlink_bb_notes (first_bb, last_bb);
532*38fd1498Szrj 
533*38fd1498Szrj   target_bb = first_bb;
534*38fd1498Szrj 
535*38fd1498Szrj   /* Make ready list big enough to hold all the instructions from the ebb.  */
536*38fd1498Szrj   sched_extend_ready_list (rgn_n_insns);
537*38fd1498Szrj   success = schedule_block (&target_bb, NULL);
538*38fd1498Szrj   gcc_assert (success || modulo_scheduling);
539*38fd1498Szrj 
540*38fd1498Szrj   /* Free ready list.  */
541*38fd1498Szrj   sched_finish_ready_list ();
542*38fd1498Szrj 
543*38fd1498Szrj   /* We might pack all instructions into fewer blocks,
544*38fd1498Szrj      so we may made some of them empty.  Can't assert (b == last_bb).  */
545*38fd1498Szrj 
546*38fd1498Szrj   /* Sanity check: verify that all region insns were scheduled.  */
547*38fd1498Szrj   gcc_assert (modulo_scheduling || sched_rgn_n_insns == rgn_n_insns);
548*38fd1498Szrj 
549*38fd1498Szrj   /* Free dependencies.  */
550*38fd1498Szrj   sched_free_deps (current_sched_info->head, current_sched_info->tail, true);
551*38fd1498Szrj 
552*38fd1498Szrj   gcc_assert (haifa_recovery_bb_ever_added_p
553*38fd1498Szrj 	      || deps_pools_are_empty_p ());
554*38fd1498Szrj 
555*38fd1498Szrj   if (EDGE_COUNT (last_bb->preds) == 0)
556*38fd1498Szrj     /* LAST_BB is unreachable.  */
557*38fd1498Szrj     {
558*38fd1498Szrj       gcc_assert (first_bb != last_bb
559*38fd1498Szrj 		  && EDGE_COUNT (last_bb->succs) == 0);
560*38fd1498Szrj       last_bb = last_bb->prev_bb;
561*38fd1498Szrj       delete_basic_block (last_bb->next_bb);
562*38fd1498Szrj     }
563*38fd1498Szrj 
564*38fd1498Szrj   return success ? last_bb : NULL;
565*38fd1498Szrj }
566*38fd1498Szrj 
567*38fd1498Szrj /* Perform initializations before running schedule_ebbs or a single
568*38fd1498Szrj    schedule_ebb.  */
569*38fd1498Szrj void
schedule_ebbs_init(void)570*38fd1498Szrj schedule_ebbs_init (void)
571*38fd1498Szrj {
572*38fd1498Szrj   /* Setup infos.  */
573*38fd1498Szrj   {
574*38fd1498Szrj     memcpy (&ebb_common_sched_info, &haifa_common_sched_info,
575*38fd1498Szrj 	    sizeof (ebb_common_sched_info));
576*38fd1498Szrj 
577*38fd1498Szrj     ebb_common_sched_info.fix_recovery_cfg = ebb_fix_recovery_cfg;
578*38fd1498Szrj     ebb_common_sched_info.add_block = ebb_add_block;
579*38fd1498Szrj     ebb_common_sched_info.sched_pass_id = SCHED_EBB_PASS;
580*38fd1498Szrj 
581*38fd1498Szrj     common_sched_info = &ebb_common_sched_info;
582*38fd1498Szrj     sched_deps_info = &ebb_sched_deps_info;
583*38fd1498Szrj     current_sched_info = &ebb_sched_info;
584*38fd1498Szrj   }
585*38fd1498Szrj 
586*38fd1498Szrj   haifa_sched_init ();
587*38fd1498Szrj 
588*38fd1498Szrj   compute_bb_for_insn ();
589*38fd1498Szrj 
590*38fd1498Szrj   /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers.  */
591*38fd1498Szrj   bitmap_initialize (&dont_calc_deps, 0);
592*38fd1498Szrj   bitmap_clear (&dont_calc_deps);
593*38fd1498Szrj }
594*38fd1498Szrj 
595*38fd1498Szrj /* Perform cleanups after scheduling using schedules_ebbs or schedule_ebb.  */
596*38fd1498Szrj void
schedule_ebbs_finish(void)597*38fd1498Szrj schedule_ebbs_finish (void)
598*38fd1498Szrj {
599*38fd1498Szrj   bitmap_clear (&dont_calc_deps);
600*38fd1498Szrj 
601*38fd1498Szrj   /* Reposition the prologue and epilogue notes in case we moved the
602*38fd1498Szrj      prologue/epilogue insns.  */
603*38fd1498Szrj   if (reload_completed)
604*38fd1498Szrj     reposition_prologue_and_epilogue_notes ();
605*38fd1498Szrj 
606*38fd1498Szrj   haifa_sched_finish ();
607*38fd1498Szrj }
608*38fd1498Szrj 
609*38fd1498Szrj /* The main entry point in this file.  */
610*38fd1498Szrj 
611*38fd1498Szrj void
schedule_ebbs(void)612*38fd1498Szrj schedule_ebbs (void)
613*38fd1498Szrj {
614*38fd1498Szrj   basic_block bb;
615*38fd1498Szrj   int probability_cutoff;
616*38fd1498Szrj   rtx_insn *tail;
617*38fd1498Szrj 
618*38fd1498Szrj   /* Taking care of this degenerate case makes the rest of
619*38fd1498Szrj      this code simpler.  */
620*38fd1498Szrj   if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
621*38fd1498Szrj     return;
622*38fd1498Szrj 
623*38fd1498Szrj   if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
624*38fd1498Szrj     probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
625*38fd1498Szrj   else
626*38fd1498Szrj     probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
627*38fd1498Szrj   probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
628*38fd1498Szrj 
629*38fd1498Szrj   schedule_ebbs_init ();
630*38fd1498Szrj 
631*38fd1498Szrj   /* Schedule every region in the subroutine.  */
632*38fd1498Szrj   FOR_EACH_BB_FN (bb, cfun)
633*38fd1498Szrj     {
634*38fd1498Szrj       rtx_insn *head = BB_HEAD (bb);
635*38fd1498Szrj 
636*38fd1498Szrj       if (bb->flags & BB_DISABLE_SCHEDULE)
637*38fd1498Szrj 	continue;
638*38fd1498Szrj 
639*38fd1498Szrj       for (;;)
640*38fd1498Szrj 	{
641*38fd1498Szrj 	  edge e;
642*38fd1498Szrj 	  tail = BB_END (bb);
643*38fd1498Szrj 	  if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
644*38fd1498Szrj 	      || LABEL_P (BB_HEAD (bb->next_bb)))
645*38fd1498Szrj 	    break;
646*38fd1498Szrj 	  e = find_fallthru_edge (bb->succs);
647*38fd1498Szrj 	  if (! e)
648*38fd1498Szrj 	    break;
649*38fd1498Szrj 	  if (e->probability.initialized_p ()
650*38fd1498Szrj 	      && e->probability.to_reg_br_prob_base () <= probability_cutoff)
651*38fd1498Szrj 	    break;
652*38fd1498Szrj 	  if (e->dest->flags & BB_DISABLE_SCHEDULE)
653*38fd1498Szrj  	    break;
654*38fd1498Szrj 	  bb = bb->next_bb;
655*38fd1498Szrj 	}
656*38fd1498Szrj 
657*38fd1498Szrj       bb = schedule_ebb (head, tail, false);
658*38fd1498Szrj     }
659*38fd1498Szrj   schedule_ebbs_finish ();
660*38fd1498Szrj }
661*38fd1498Szrj 
662*38fd1498Szrj /* INSN has been added to/removed from current ebb.  */
663*38fd1498Szrj static void
ebb_add_remove_insn(rtx_insn * insn ATTRIBUTE_UNUSED,int remove_p)664*38fd1498Szrj ebb_add_remove_insn (rtx_insn *insn ATTRIBUTE_UNUSED, int remove_p)
665*38fd1498Szrj {
666*38fd1498Szrj   if (!remove_p)
667*38fd1498Szrj     rgn_n_insns++;
668*38fd1498Szrj   else
669*38fd1498Szrj     rgn_n_insns--;
670*38fd1498Szrj }
671*38fd1498Szrj 
672*38fd1498Szrj /* BB was added to ebb after AFTER.  */
673*38fd1498Szrj static void
ebb_add_block(basic_block bb,basic_block after)674*38fd1498Szrj ebb_add_block (basic_block bb, basic_block after)
675*38fd1498Szrj {
676*38fd1498Szrj   /* Recovery blocks are always bounded by BARRIERS,
677*38fd1498Szrj      therefore, they always form single block EBB,
678*38fd1498Szrj      therefore, we can use rec->index to identify such EBBs.  */
679*38fd1498Szrj   if (after == EXIT_BLOCK_PTR_FOR_FN (cfun))
680*38fd1498Szrj     bitmap_set_bit (&dont_calc_deps, bb->index);
681*38fd1498Szrj   else if (after == last_bb)
682*38fd1498Szrj     last_bb = bb;
683*38fd1498Szrj }
684*38fd1498Szrj 
685*38fd1498Szrj /* Return next block in ebb chain.  For parameter meaning please refer to
686*38fd1498Szrj    sched-int.h: struct sched_info: advance_target_bb.  */
687*38fd1498Szrj static basic_block
advance_target_bb(basic_block bb,rtx_insn * insn)688*38fd1498Szrj advance_target_bb (basic_block bb, rtx_insn *insn)
689*38fd1498Szrj {
690*38fd1498Szrj   if (insn)
691*38fd1498Szrj     {
692*38fd1498Szrj       if (BLOCK_FOR_INSN (insn) != bb
693*38fd1498Szrj 	  && control_flow_insn_p (insn)
694*38fd1498Szrj 	  /* We handle interblock movement of the speculation check
695*38fd1498Szrj 	     or over a speculation check in
696*38fd1498Szrj 	     haifa-sched.c: move_block_after_check ().  */
697*38fd1498Szrj 	  && !IS_SPECULATION_BRANCHY_CHECK_P (insn)
698*38fd1498Szrj 	  && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb)))
699*38fd1498Szrj 	{
700*38fd1498Szrj 	  /* Assert that we don't move jumps across blocks.  */
701*38fd1498Szrj 	  gcc_assert (!control_flow_insn_p (BB_END (bb))
702*38fd1498Szrj 		      && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb)));
703*38fd1498Szrj 	  return bb;
704*38fd1498Szrj 	}
705*38fd1498Szrj       else
706*38fd1498Szrj 	return 0;
707*38fd1498Szrj     }
708*38fd1498Szrj   else
709*38fd1498Szrj     /* Return next non empty block.  */
710*38fd1498Szrj     {
711*38fd1498Szrj       do
712*38fd1498Szrj 	{
713*38fd1498Szrj 	  gcc_assert (bb != last_bb);
714*38fd1498Szrj 
715*38fd1498Szrj 	  bb = bb->next_bb;
716*38fd1498Szrj 	}
717*38fd1498Szrj       while (bb_note (bb) == BB_END (bb));
718*38fd1498Szrj 
719*38fd1498Szrj       return bb;
720*38fd1498Szrj     }
721*38fd1498Szrj }
722*38fd1498Szrj 
723*38fd1498Szrj /* Fix internal data after interblock movement of jump instruction.
724*38fd1498Szrj    For parameter meaning please refer to
725*38fd1498Szrj    sched-int.h: struct sched_info: fix_recovery_cfg.  */
726*38fd1498Szrj static void
ebb_fix_recovery_cfg(int bbi ATTRIBUTE_UNUSED,int jump_bbi,int jump_bb_nexti)727*38fd1498Szrj ebb_fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi,
728*38fd1498Szrj 		      int jump_bb_nexti)
729*38fd1498Szrj {
730*38fd1498Szrj   gcc_assert (last_bb->index != bbi);
731*38fd1498Szrj 
732*38fd1498Szrj   if (jump_bb_nexti == last_bb->index)
733*38fd1498Szrj     last_bb = BASIC_BLOCK_FOR_FN (cfun, jump_bbi);
734*38fd1498Szrj }
735*38fd1498Szrj 
736*38fd1498Szrj #endif /* INSN_SCHEDULING */
737