xref: /openbsd/gnu/gcc/gcc/haifa-sched.c (revision 404b540a)
1*404b540aSrobert /* Instruction scheduling pass.
2*404b540aSrobert    Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3*404b540aSrobert    2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4*404b540aSrobert    Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5*404b540aSrobert    and currently maintained by, Jim Wilson (wilson@cygnus.com)
6*404b540aSrobert 
7*404b540aSrobert This file is part of GCC.
8*404b540aSrobert 
9*404b540aSrobert GCC is free software; you can redistribute it and/or modify it under
10*404b540aSrobert the terms of the GNU General Public License as published by the Free
11*404b540aSrobert Software Foundation; either version 2, or (at your option) any later
12*404b540aSrobert version.
13*404b540aSrobert 
14*404b540aSrobert GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15*404b540aSrobert WARRANTY; without even the implied warranty of MERCHANTABILITY or
16*404b540aSrobert FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
17*404b540aSrobert for more details.
18*404b540aSrobert 
19*404b540aSrobert You should have received a copy of the GNU General Public License
20*404b540aSrobert along with GCC; see the file COPYING.  If not, write to the Free
21*404b540aSrobert Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
22*404b540aSrobert 02110-1301, USA.  */
23*404b540aSrobert 
24*404b540aSrobert /* Instruction scheduling pass.  This file, along with sched-deps.c,
25*404b540aSrobert    contains the generic parts.  The actual entry point is found for
26*404b540aSrobert    the normal instruction scheduling pass is found in sched-rgn.c.
27*404b540aSrobert 
28*404b540aSrobert    We compute insn priorities based on data dependencies.  Flow
29*404b540aSrobert    analysis only creates a fraction of the data-dependencies we must
30*404b540aSrobert    observe: namely, only those dependencies which the combiner can be
31*404b540aSrobert    expected to use.  For this pass, we must therefore create the
32*404b540aSrobert    remaining dependencies we need to observe: register dependencies,
33*404b540aSrobert    memory dependencies, dependencies to keep function calls in order,
34*404b540aSrobert    and the dependence between a conditional branch and the setting of
35*404b540aSrobert    condition codes are all dealt with here.
36*404b540aSrobert 
37*404b540aSrobert    The scheduler first traverses the data flow graph, starting with
38*404b540aSrobert    the last instruction, and proceeding to the first, assigning values
39*404b540aSrobert    to insn_priority as it goes.  This sorts the instructions
40*404b540aSrobert    topologically by data dependence.
41*404b540aSrobert 
42*404b540aSrobert    Once priorities have been established, we order the insns using
43*404b540aSrobert    list scheduling.  This works as follows: starting with a list of
44*404b540aSrobert    all the ready insns, and sorted according to priority number, we
45*404b540aSrobert    schedule the insn from the end of the list by placing its
46*404b540aSrobert    predecessors in the list according to their priority order.  We
47*404b540aSrobert    consider this insn scheduled by setting the pointer to the "end" of
48*404b540aSrobert    the list to point to the previous insn.  When an insn has no
49*404b540aSrobert    predecessors, we either queue it until sufficient time has elapsed
50*404b540aSrobert    or add it to the ready list.  As the instructions are scheduled or
51*404b540aSrobert    when stalls are introduced, the queue advances and dumps insns into
52*404b540aSrobert    the ready list.  When all insns down to the lowest priority have
53*404b540aSrobert    been scheduled, the critical path of the basic block has been made
54*404b540aSrobert    as short as possible.  The remaining insns are then scheduled in
55*404b540aSrobert    remaining slots.
56*404b540aSrobert 
57*404b540aSrobert    The following list shows the order in which we want to break ties
58*404b540aSrobert    among insns in the ready list:
59*404b540aSrobert 
60*404b540aSrobert    1.  choose insn with the longest path to end of bb, ties
61*404b540aSrobert    broken by
62*404b540aSrobert    2.  choose insn with least contribution to register pressure,
63*404b540aSrobert    ties broken by
64*404b540aSrobert    3.  prefer in-block upon interblock motion, ties broken by
65*404b540aSrobert    4.  prefer useful upon speculative motion, ties broken by
66*404b540aSrobert    5.  choose insn with largest control flow probability, ties
67*404b540aSrobert    broken by
68*404b540aSrobert    6.  choose insn with the least dependences upon the previously
69*404b540aSrobert    scheduled insn, or finally
70*404b540aSrobert    7   choose the insn which has the most insns dependent on it.
71*404b540aSrobert    8.  choose insn with lowest UID.
72*404b540aSrobert 
73*404b540aSrobert    Memory references complicate matters.  Only if we can be certain
74*404b540aSrobert    that memory references are not part of the data dependency graph
75*404b540aSrobert    (via true, anti, or output dependence), can we move operations past
76*404b540aSrobert    memory references.  To first approximation, reads can be done
77*404b540aSrobert    independently, while writes introduce dependencies.  Better
78*404b540aSrobert    approximations will yield fewer dependencies.
79*404b540aSrobert 
80*404b540aSrobert    Before reload, an extended analysis of interblock data dependences
81*404b540aSrobert    is required for interblock scheduling.  This is performed in
82*404b540aSrobert    compute_block_backward_dependences ().
83*404b540aSrobert 
84*404b540aSrobert    Dependencies set up by memory references are treated in exactly the
85*404b540aSrobert    same way as other dependencies, by using LOG_LINKS backward
86*404b540aSrobert    dependences.  LOG_LINKS are translated into INSN_DEPEND forward
87*404b540aSrobert    dependences for the purpose of forward list scheduling.
88*404b540aSrobert 
89*404b540aSrobert    Having optimized the critical path, we may have also unduly
90*404b540aSrobert    extended the lifetimes of some registers.  If an operation requires
91*404b540aSrobert    that constants be loaded into registers, it is certainly desirable
92*404b540aSrobert    to load those constants as early as necessary, but no earlier.
93*404b540aSrobert    I.e., it will not do to load up a bunch of registers at the
94*404b540aSrobert    beginning of a basic block only to use them at the end, if they
95*404b540aSrobert    could be loaded later, since this may result in excessive register
96*404b540aSrobert    utilization.
97*404b540aSrobert 
98*404b540aSrobert    Note that since branches are never in basic blocks, but only end
99*404b540aSrobert    basic blocks, this pass will not move branches.  But that is ok,
100*404b540aSrobert    since we can use GNU's delayed branch scheduling pass to take care
101*404b540aSrobert    of this case.
102*404b540aSrobert 
103*404b540aSrobert    Also note that no further optimizations based on algebraic
104*404b540aSrobert    identities are performed, so this pass would be a good one to
105*404b540aSrobert    perform instruction splitting, such as breaking up a multiply
106*404b540aSrobert    instruction into shifts and adds where that is profitable.
107*404b540aSrobert 
108*404b540aSrobert    Given the memory aliasing analysis that this pass should perform,
109*404b540aSrobert    it should be possible to remove redundant stores to memory, and to
110*404b540aSrobert    load values from registers instead of hitting memory.
111*404b540aSrobert 
112*404b540aSrobert    Before reload, speculative insns are moved only if a 'proof' exists
113*404b540aSrobert    that no exception will be caused by this, and if no live registers
114*404b540aSrobert    exist that inhibit the motion (live registers constraints are not
115*404b540aSrobert    represented by data dependence edges).
116*404b540aSrobert 
117*404b540aSrobert    This pass must update information that subsequent passes expect to
118*404b540aSrobert    be correct.  Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119*404b540aSrobert    reg_n_calls_crossed, and reg_live_length.  Also, BB_HEAD, BB_END.
120*404b540aSrobert 
121*404b540aSrobert    The information in the line number notes is carefully retained by
122*404b540aSrobert    this pass.  Notes that refer to the starting and ending of
123*404b540aSrobert    exception regions are also carefully retained by this pass.  All
124*404b540aSrobert    other NOTE insns are grouped in their same relative order at the
125*404b540aSrobert    beginning of basic blocks and regions that have been scheduled.  */
126*404b540aSrobert 
127*404b540aSrobert #include "config.h"
128*404b540aSrobert #include "system.h"
129*404b540aSrobert #include "coretypes.h"
130*404b540aSrobert #include "tm.h"
131*404b540aSrobert #include "toplev.h"
132*404b540aSrobert #include "rtl.h"
133*404b540aSrobert #include "tm_p.h"
134*404b540aSrobert #include "hard-reg-set.h"
135*404b540aSrobert #include "regs.h"
136*404b540aSrobert #include "function.h"
137*404b540aSrobert #include "flags.h"
138*404b540aSrobert #include "insn-config.h"
139*404b540aSrobert #include "insn-attr.h"
140*404b540aSrobert #include "except.h"
141*404b540aSrobert #include "toplev.h"
142*404b540aSrobert #include "recog.h"
143*404b540aSrobert #include "sched-int.h"
144*404b540aSrobert #include "target.h"
145*404b540aSrobert #include "output.h"
146*404b540aSrobert #include "params.h"
147*404b540aSrobert 
148*404b540aSrobert #ifdef INSN_SCHEDULING
149*404b540aSrobert 
150*404b540aSrobert /* issue_rate is the number of insns that can be scheduled in the same
151*404b540aSrobert    machine cycle.  It can be defined in the config/mach/mach.h file,
152*404b540aSrobert    otherwise we set it to 1.  */
153*404b540aSrobert 
154*404b540aSrobert static int issue_rate;
155*404b540aSrobert 
156*404b540aSrobert /* sched-verbose controls the amount of debugging output the
157*404b540aSrobert    scheduler prints.  It is controlled by -fsched-verbose=N:
158*404b540aSrobert    N>0 and no -DSR : the output is directed to stderr.
159*404b540aSrobert    N>=10 will direct the printouts to stderr (regardless of -dSR).
160*404b540aSrobert    N=1: same as -dSR.
161*404b540aSrobert    N=2: bb's probabilities, detailed ready list info, unit/insn info.
162*404b540aSrobert    N=3: rtl at abort point, control-flow, regions info.
163*404b540aSrobert    N=5: dependences info.  */
164*404b540aSrobert 
165*404b540aSrobert static int sched_verbose_param = 0;
166*404b540aSrobert int sched_verbose = 0;
167*404b540aSrobert 
168*404b540aSrobert /* Debugging file.  All printouts are sent to dump, which is always set,
169*404b540aSrobert    either to stderr, or to the dump listing file (-dRS).  */
170*404b540aSrobert FILE *sched_dump = 0;
171*404b540aSrobert 
172*404b540aSrobert /* Highest uid before scheduling.  */
173*404b540aSrobert static int old_max_uid;
174*404b540aSrobert 
175*404b540aSrobert /* fix_sched_param() is called from toplev.c upon detection
176*404b540aSrobert    of the -fsched-verbose=N option.  */
177*404b540aSrobert 
178*404b540aSrobert void
fix_sched_param(const char * param,const char * val)179*404b540aSrobert fix_sched_param (const char *param, const char *val)
180*404b540aSrobert {
181*404b540aSrobert   if (!strcmp (param, "verbose"))
182*404b540aSrobert     sched_verbose_param = atoi (val);
183*404b540aSrobert   else
184*404b540aSrobert     warning (0, "fix_sched_param: unknown param: %s", param);
185*404b540aSrobert }
186*404b540aSrobert 
187*404b540aSrobert struct haifa_insn_data *h_i_d;
188*404b540aSrobert 
189*404b540aSrobert #define LINE_NOTE(INSN)		(h_i_d[INSN_UID (INSN)].line_note)
190*404b540aSrobert #define INSN_TICK(INSN)		(h_i_d[INSN_UID (INSN)].tick)
191*404b540aSrobert #define INTER_TICK(INSN)        (h_i_d[INSN_UID (INSN)].inter_tick)
192*404b540aSrobert 
193*404b540aSrobert /* If INSN_TICK of an instruction is equal to INVALID_TICK,
194*404b540aSrobert    then it should be recalculated from scratch.  */
195*404b540aSrobert #define INVALID_TICK (-(max_insn_queue_index + 1))
196*404b540aSrobert /* The minimal value of the INSN_TICK of an instruction.  */
197*404b540aSrobert #define MIN_TICK (-max_insn_queue_index)
198*404b540aSrobert 
199*404b540aSrobert /* Issue points are used to distinguish between instructions in max_issue ().
200*404b540aSrobert    For now, all instructions are equally good.  */
201*404b540aSrobert #define ISSUE_POINTS(INSN) 1
202*404b540aSrobert 
203*404b540aSrobert /* Vector indexed by basic block number giving the starting line-number
204*404b540aSrobert    for each basic block.  */
205*404b540aSrobert static rtx *line_note_head;
206*404b540aSrobert 
207*404b540aSrobert /* List of important notes we must keep around.  This is a pointer to the
208*404b540aSrobert    last element in the list.  */
209*404b540aSrobert static rtx note_list;
210*404b540aSrobert 
211*404b540aSrobert static struct spec_info_def spec_info_var;
212*404b540aSrobert /* Description of the speculative part of the scheduling.
213*404b540aSrobert    If NULL - no speculation.  */
214*404b540aSrobert static spec_info_t spec_info;
215*404b540aSrobert 
216*404b540aSrobert /* True, if recovery block was added during scheduling of current block.
217*404b540aSrobert    Used to determine, if we need to fix INSN_TICKs.  */
218*404b540aSrobert static bool added_recovery_block_p;
219*404b540aSrobert 
220*404b540aSrobert /* Counters of different types of speculative instructions.  */
221*404b540aSrobert static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
222*404b540aSrobert 
223*404b540aSrobert /* Pointers to GLAT data.  See init_glat for more information.  */
224*404b540aSrobert regset *glat_start, *glat_end;
225*404b540aSrobert 
226*404b540aSrobert /* Array used in {unlink, restore}_bb_notes.  */
227*404b540aSrobert static rtx *bb_header = 0;
228*404b540aSrobert 
229*404b540aSrobert /* Number of basic_blocks.  */
230*404b540aSrobert static int old_last_basic_block;
231*404b540aSrobert 
232*404b540aSrobert /* Basic block after which recovery blocks will be created.  */
233*404b540aSrobert static basic_block before_recovery;
234*404b540aSrobert 
235*404b540aSrobert /* Queues, etc.  */
236*404b540aSrobert 
237*404b540aSrobert /* An instruction is ready to be scheduled when all insns preceding it
238*404b540aSrobert    have already been scheduled.  It is important to ensure that all
239*404b540aSrobert    insns which use its result will not be executed until its result
240*404b540aSrobert    has been computed.  An insn is maintained in one of four structures:
241*404b540aSrobert 
242*404b540aSrobert    (P) the "Pending" set of insns which cannot be scheduled until
243*404b540aSrobert    their dependencies have been satisfied.
244*404b540aSrobert    (Q) the "Queued" set of insns that can be scheduled when sufficient
245*404b540aSrobert    time has passed.
246*404b540aSrobert    (R) the "Ready" list of unscheduled, uncommitted insns.
247*404b540aSrobert    (S) the "Scheduled" list of insns.
248*404b540aSrobert 
249*404b540aSrobert    Initially, all insns are either "Pending" or "Ready" depending on
250*404b540aSrobert    whether their dependencies are satisfied.
251*404b540aSrobert 
252*404b540aSrobert    Insns move from the "Ready" list to the "Scheduled" list as they
253*404b540aSrobert    are committed to the schedule.  As this occurs, the insns in the
254*404b540aSrobert    "Pending" list have their dependencies satisfied and move to either
255*404b540aSrobert    the "Ready" list or the "Queued" set depending on whether
256*404b540aSrobert    sufficient time has passed to make them ready.  As time passes,
257*404b540aSrobert    insns move from the "Queued" set to the "Ready" list.
258*404b540aSrobert 
259*404b540aSrobert    The "Pending" list (P) are the insns in the INSN_DEPEND of the unscheduled
260*404b540aSrobert    insns, i.e., those that are ready, queued, and pending.
261*404b540aSrobert    The "Queued" set (Q) is implemented by the variable `insn_queue'.
262*404b540aSrobert    The "Ready" list (R) is implemented by the variables `ready' and
263*404b540aSrobert    `n_ready'.
264*404b540aSrobert    The "Scheduled" list (S) is the new insn chain built by this pass.
265*404b540aSrobert 
266*404b540aSrobert    The transition (R->S) is implemented in the scheduling loop in
267*404b540aSrobert    `schedule_block' when the best insn to schedule is chosen.
268*404b540aSrobert    The transitions (P->R and P->Q) are implemented in `schedule_insn' as
269*404b540aSrobert    insns move from the ready list to the scheduled list.
270*404b540aSrobert    The transition (Q->R) is implemented in 'queue_to_insn' as time
271*404b540aSrobert    passes or stalls are introduced.  */
272*404b540aSrobert 
273*404b540aSrobert /* Implement a circular buffer to delay instructions until sufficient
274*404b540aSrobert    time has passed.  For the new pipeline description interface,
275*404b540aSrobert    MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
276*404b540aSrobert    than maximal time of instruction execution computed by genattr.c on
277*404b540aSrobert    the base maximal time of functional unit reservations and getting a
278*404b540aSrobert    result.  This is the longest time an insn may be queued.  */
279*404b540aSrobert 
280*404b540aSrobert static rtx *insn_queue;
281*404b540aSrobert static int q_ptr = 0;
282*404b540aSrobert static int q_size = 0;
283*404b540aSrobert #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
284*404b540aSrobert #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
285*404b540aSrobert 
286*404b540aSrobert #define QUEUE_SCHEDULED (-3)
287*404b540aSrobert #define QUEUE_NOWHERE   (-2)
288*404b540aSrobert #define QUEUE_READY     (-1)
289*404b540aSrobert /* QUEUE_SCHEDULED - INSN is scheduled.
290*404b540aSrobert    QUEUE_NOWHERE   - INSN isn't scheduled yet and is neither in
291*404b540aSrobert    queue or ready list.
292*404b540aSrobert    QUEUE_READY     - INSN is in ready list.
293*404b540aSrobert    N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles.  */
294*404b540aSrobert 
295*404b540aSrobert #define QUEUE_INDEX(INSN) (h_i_d[INSN_UID (INSN)].queue_index)
296*404b540aSrobert 
297*404b540aSrobert /* The following variable value refers for all current and future
298*404b540aSrobert    reservations of the processor units.  */
299*404b540aSrobert state_t curr_state;
300*404b540aSrobert 
301*404b540aSrobert /* The following variable value is size of memory representing all
302*404b540aSrobert    current and future reservations of the processor units.  */
303*404b540aSrobert static size_t dfa_state_size;
304*404b540aSrobert 
305*404b540aSrobert /* The following array is used to find the best insn from ready when
306*404b540aSrobert    the automaton pipeline interface is used.  */
307*404b540aSrobert static char *ready_try;
308*404b540aSrobert 
309*404b540aSrobert /* Describe the ready list of the scheduler.
310*404b540aSrobert    VEC holds space enough for all insns in the current region.  VECLEN
311*404b540aSrobert    says how many exactly.
312*404b540aSrobert    FIRST is the index of the element with the highest priority; i.e. the
313*404b540aSrobert    last one in the ready list, since elements are ordered by ascending
314*404b540aSrobert    priority.
315*404b540aSrobert    N_READY determines how many insns are on the ready list.  */
316*404b540aSrobert 
317*404b540aSrobert struct ready_list
318*404b540aSrobert {
319*404b540aSrobert   rtx *vec;
320*404b540aSrobert   int veclen;
321*404b540aSrobert   int first;
322*404b540aSrobert   int n_ready;
323*404b540aSrobert };
324*404b540aSrobert 
325*404b540aSrobert /* The pointer to the ready list.  */
326*404b540aSrobert static struct ready_list *readyp;
327*404b540aSrobert 
328*404b540aSrobert /* Scheduling clock.  */
329*404b540aSrobert static int clock_var;
330*404b540aSrobert 
331*404b540aSrobert /* Number of instructions in current scheduling region.  */
332*404b540aSrobert static int rgn_n_insns;
333*404b540aSrobert 
334*404b540aSrobert static int may_trap_exp (rtx, int);
335*404b540aSrobert 
336*404b540aSrobert /* Nonzero iff the address is comprised from at most 1 register.  */
337*404b540aSrobert #define CONST_BASED_ADDRESS_P(x)			\
338*404b540aSrobert   (REG_P (x)					\
339*404b540aSrobert    || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS	\
340*404b540aSrobert 	|| (GET_CODE (x) == LO_SUM))			\
341*404b540aSrobert        && (CONSTANT_P (XEXP (x, 0))			\
342*404b540aSrobert 	   || CONSTANT_P (XEXP (x, 1)))))
343*404b540aSrobert 
344*404b540aSrobert /* Returns a class that insn with GET_DEST(insn)=x may belong to,
345*404b540aSrobert    as found by analyzing insn's expression.  */
346*404b540aSrobert 
347*404b540aSrobert static int
may_trap_exp(rtx x,int is_store)348*404b540aSrobert may_trap_exp (rtx x, int is_store)
349*404b540aSrobert {
350*404b540aSrobert   enum rtx_code code;
351*404b540aSrobert 
352*404b540aSrobert   if (x == 0)
353*404b540aSrobert     return TRAP_FREE;
354*404b540aSrobert   code = GET_CODE (x);
355*404b540aSrobert   if (is_store)
356*404b540aSrobert     {
357*404b540aSrobert       if (code == MEM && may_trap_p (x))
358*404b540aSrobert 	return TRAP_RISKY;
359*404b540aSrobert       else
360*404b540aSrobert 	return TRAP_FREE;
361*404b540aSrobert     }
362*404b540aSrobert   if (code == MEM)
363*404b540aSrobert     {
364*404b540aSrobert       /* The insn uses memory:  a volatile load.  */
365*404b540aSrobert       if (MEM_VOLATILE_P (x))
366*404b540aSrobert 	return IRISKY;
367*404b540aSrobert       /* An exception-free load.  */
368*404b540aSrobert       if (!may_trap_p (x))
369*404b540aSrobert 	return IFREE;
370*404b540aSrobert       /* A load with 1 base register, to be further checked.  */
371*404b540aSrobert       if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
372*404b540aSrobert 	return PFREE_CANDIDATE;
373*404b540aSrobert       /* No info on the load, to be further checked.  */
374*404b540aSrobert       return PRISKY_CANDIDATE;
375*404b540aSrobert     }
376*404b540aSrobert   else
377*404b540aSrobert     {
378*404b540aSrobert       const char *fmt;
379*404b540aSrobert       int i, insn_class = TRAP_FREE;
380*404b540aSrobert 
381*404b540aSrobert       /* Neither store nor load, check if it may cause a trap.  */
382*404b540aSrobert       if (may_trap_p (x))
383*404b540aSrobert 	return TRAP_RISKY;
384*404b540aSrobert       /* Recursive step: walk the insn...  */
385*404b540aSrobert       fmt = GET_RTX_FORMAT (code);
386*404b540aSrobert       for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
387*404b540aSrobert 	{
388*404b540aSrobert 	  if (fmt[i] == 'e')
389*404b540aSrobert 	    {
390*404b540aSrobert 	      int tmp_class = may_trap_exp (XEXP (x, i), is_store);
391*404b540aSrobert 	      insn_class = WORST_CLASS (insn_class, tmp_class);
392*404b540aSrobert 	    }
393*404b540aSrobert 	  else if (fmt[i] == 'E')
394*404b540aSrobert 	    {
395*404b540aSrobert 	      int j;
396*404b540aSrobert 	      for (j = 0; j < XVECLEN (x, i); j++)
397*404b540aSrobert 		{
398*404b540aSrobert 		  int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
399*404b540aSrobert 		  insn_class = WORST_CLASS (insn_class, tmp_class);
400*404b540aSrobert 		  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
401*404b540aSrobert 		    break;
402*404b540aSrobert 		}
403*404b540aSrobert 	    }
404*404b540aSrobert 	  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
405*404b540aSrobert 	    break;
406*404b540aSrobert 	}
407*404b540aSrobert       return insn_class;
408*404b540aSrobert     }
409*404b540aSrobert }
410*404b540aSrobert 
411*404b540aSrobert /* Classifies insn for the purpose of verifying that it can be
412*404b540aSrobert    moved speculatively, by examining it's patterns, returning:
413*404b540aSrobert    TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
414*404b540aSrobert    TRAP_FREE: non-load insn.
415*404b540aSrobert    IFREE: load from a globally safe location.
416*404b540aSrobert    IRISKY: volatile load.
417*404b540aSrobert    PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
418*404b540aSrobert    being either PFREE or PRISKY.  */
419*404b540aSrobert 
420*404b540aSrobert int
haifa_classify_insn(rtx insn)421*404b540aSrobert haifa_classify_insn (rtx insn)
422*404b540aSrobert {
423*404b540aSrobert   rtx pat = PATTERN (insn);
424*404b540aSrobert   int tmp_class = TRAP_FREE;
425*404b540aSrobert   int insn_class = TRAP_FREE;
426*404b540aSrobert   enum rtx_code code;
427*404b540aSrobert 
428*404b540aSrobert   if (GET_CODE (pat) == PARALLEL)
429*404b540aSrobert     {
430*404b540aSrobert       int i, len = XVECLEN (pat, 0);
431*404b540aSrobert 
432*404b540aSrobert       for (i = len - 1; i >= 0; i--)
433*404b540aSrobert 	{
434*404b540aSrobert 	  code = GET_CODE (XVECEXP (pat, 0, i));
435*404b540aSrobert 	  switch (code)
436*404b540aSrobert 	    {
437*404b540aSrobert 	    case CLOBBER:
438*404b540aSrobert 	      /* Test if it is a 'store'.  */
439*404b540aSrobert 	      tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
440*404b540aSrobert 	      break;
441*404b540aSrobert 	    case SET:
442*404b540aSrobert 	      /* Test if it is a store.  */
443*404b540aSrobert 	      tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
444*404b540aSrobert 	      if (tmp_class == TRAP_RISKY)
445*404b540aSrobert 		break;
446*404b540aSrobert 	      /* Test if it is a load.  */
447*404b540aSrobert 	      tmp_class
448*404b540aSrobert 		= WORST_CLASS (tmp_class,
449*404b540aSrobert 			       may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
450*404b540aSrobert 					     0));
451*404b540aSrobert 	      break;
452*404b540aSrobert 	    case COND_EXEC:
453*404b540aSrobert 	    case TRAP_IF:
454*404b540aSrobert 	      tmp_class = TRAP_RISKY;
455*404b540aSrobert 	      break;
456*404b540aSrobert 	    default:
457*404b540aSrobert 	      ;
458*404b540aSrobert 	    }
459*404b540aSrobert 	  insn_class = WORST_CLASS (insn_class, tmp_class);
460*404b540aSrobert 	  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
461*404b540aSrobert 	    break;
462*404b540aSrobert 	}
463*404b540aSrobert     }
464*404b540aSrobert   else
465*404b540aSrobert     {
466*404b540aSrobert       code = GET_CODE (pat);
467*404b540aSrobert       switch (code)
468*404b540aSrobert 	{
469*404b540aSrobert 	case CLOBBER:
470*404b540aSrobert 	  /* Test if it is a 'store'.  */
471*404b540aSrobert 	  tmp_class = may_trap_exp (XEXP (pat, 0), 1);
472*404b540aSrobert 	  break;
473*404b540aSrobert 	case SET:
474*404b540aSrobert 	  /* Test if it is a store.  */
475*404b540aSrobert 	  tmp_class = may_trap_exp (SET_DEST (pat), 1);
476*404b540aSrobert 	  if (tmp_class == TRAP_RISKY)
477*404b540aSrobert 	    break;
478*404b540aSrobert 	  /* Test if it is a load.  */
479*404b540aSrobert 	  tmp_class =
480*404b540aSrobert 	    WORST_CLASS (tmp_class,
481*404b540aSrobert 			 may_trap_exp (SET_SRC (pat), 0));
482*404b540aSrobert 	  break;
483*404b540aSrobert 	case COND_EXEC:
484*404b540aSrobert 	case TRAP_IF:
485*404b540aSrobert 	  tmp_class = TRAP_RISKY;
486*404b540aSrobert 	  break;
487*404b540aSrobert 	default:;
488*404b540aSrobert 	}
489*404b540aSrobert       insn_class = tmp_class;
490*404b540aSrobert     }
491*404b540aSrobert 
492*404b540aSrobert   return insn_class;
493*404b540aSrobert }
494*404b540aSrobert 
495*404b540aSrobert /* Forward declarations.  */
496*404b540aSrobert 
497*404b540aSrobert HAIFA_INLINE static int insn_cost1 (rtx, enum reg_note, rtx, rtx);
498*404b540aSrobert static int priority (rtx);
499*404b540aSrobert static int rank_for_schedule (const void *, const void *);
500*404b540aSrobert static void swap_sort (rtx *, int);
501*404b540aSrobert static void queue_insn (rtx, int);
502*404b540aSrobert static int schedule_insn (rtx);
503*404b540aSrobert static int find_set_reg_weight (rtx);
504*404b540aSrobert static void find_insn_reg_weight (basic_block);
505*404b540aSrobert static void find_insn_reg_weight1 (rtx);
506*404b540aSrobert static void adjust_priority (rtx);
507*404b540aSrobert static void advance_one_cycle (void);
508*404b540aSrobert 
509*404b540aSrobert /* Notes handling mechanism:
510*404b540aSrobert    =========================
511*404b540aSrobert    Generally, NOTES are saved before scheduling and restored after scheduling.
512*404b540aSrobert    The scheduler distinguishes between three types of notes:
513*404b540aSrobert 
514*404b540aSrobert    (1) LINE_NUMBER notes, generated and used for debugging.  Here,
515*404b540aSrobert    before scheduling a region, a pointer to the LINE_NUMBER note is
516*404b540aSrobert    added to the insn following it (in save_line_notes()), and the note
517*404b540aSrobert    is removed (in rm_line_notes() and unlink_line_notes()).  After
518*404b540aSrobert    scheduling the region, this pointer is used for regeneration of
519*404b540aSrobert    the LINE_NUMBER note (in restore_line_notes()).
520*404b540aSrobert 
521*404b540aSrobert    (2) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
522*404b540aSrobert    Before scheduling a region, a pointer to the note is added to the insn
523*404b540aSrobert    that follows or precedes it.  (This happens as part of the data dependence
524*404b540aSrobert    computation).  After scheduling an insn, the pointer contained in it is
525*404b540aSrobert    used for regenerating the corresponding note (in reemit_notes).
526*404b540aSrobert 
527*404b540aSrobert    (3) All other notes (e.g. INSN_DELETED):  Before scheduling a block,
528*404b540aSrobert    these notes are put in a list (in rm_other_notes() and
529*404b540aSrobert    unlink_other_notes ()).  After scheduling the block, these notes are
530*404b540aSrobert    inserted at the beginning of the block (in schedule_block()).  */
531*404b540aSrobert 
532*404b540aSrobert static rtx unlink_other_notes (rtx, rtx);
533*404b540aSrobert static rtx unlink_line_notes (rtx, rtx);
534*404b540aSrobert static void reemit_notes (rtx);
535*404b540aSrobert 
536*404b540aSrobert static rtx *ready_lastpos (struct ready_list *);
537*404b540aSrobert static void ready_add (struct ready_list *, rtx, bool);
538*404b540aSrobert static void ready_sort (struct ready_list *);
539*404b540aSrobert static rtx ready_remove_first (struct ready_list *);
540*404b540aSrobert 
541*404b540aSrobert static void queue_to_ready (struct ready_list *);
542*404b540aSrobert static int early_queue_to_ready (state_t, struct ready_list *);
543*404b540aSrobert 
544*404b540aSrobert static void debug_ready_list (struct ready_list *);
545*404b540aSrobert 
546*404b540aSrobert static void move_insn (rtx);
547*404b540aSrobert 
548*404b540aSrobert /* The following functions are used to implement multi-pass scheduling
549*404b540aSrobert    on the first cycle.  */
550*404b540aSrobert static rtx ready_element (struct ready_list *, int);
551*404b540aSrobert static rtx ready_remove (struct ready_list *, int);
552*404b540aSrobert static void ready_remove_insn (rtx);
553*404b540aSrobert static int max_issue (struct ready_list *, int *, int);
554*404b540aSrobert 
555*404b540aSrobert static rtx choose_ready (struct ready_list *);
556*404b540aSrobert 
557*404b540aSrobert static void fix_inter_tick (rtx, rtx);
558*404b540aSrobert static int fix_tick_ready (rtx);
559*404b540aSrobert static void change_queue_index (rtx, int);
560*404b540aSrobert static void resolve_dep (rtx, rtx);
561*404b540aSrobert 
562*404b540aSrobert /* The following functions are used to implement scheduling of data/control
563*404b540aSrobert    speculative instructions.  */
564*404b540aSrobert 
565*404b540aSrobert static void extend_h_i_d (void);
566*404b540aSrobert static void extend_ready (int);
567*404b540aSrobert static void extend_global (rtx);
568*404b540aSrobert static void extend_all (rtx);
569*404b540aSrobert static void init_h_i_d (rtx);
570*404b540aSrobert static void generate_recovery_code (rtx);
571*404b540aSrobert static void process_insn_depend_be_in_spec (rtx, rtx, ds_t);
572*404b540aSrobert static void begin_speculative_block (rtx);
573*404b540aSrobert static void add_to_speculative_block (rtx);
574*404b540aSrobert static dw_t dep_weak (ds_t);
575*404b540aSrobert static edge find_fallthru_edge (basic_block);
576*404b540aSrobert static void init_before_recovery (void);
577*404b540aSrobert static basic_block create_recovery_block (void);
578*404b540aSrobert static void create_check_block_twin (rtx, bool);
579*404b540aSrobert static void fix_recovery_deps (basic_block);
580*404b540aSrobert static void associate_line_notes_with_blocks (basic_block);
581*404b540aSrobert static void change_pattern (rtx, rtx);
582*404b540aSrobert static int speculate_insn (rtx, ds_t, rtx *);
583*404b540aSrobert static void dump_new_block_header (int, basic_block, rtx, rtx);
584*404b540aSrobert static void restore_bb_notes (basic_block);
585*404b540aSrobert static void extend_bb (basic_block);
586*404b540aSrobert static void fix_jump_move (rtx);
587*404b540aSrobert static void move_block_after_check (rtx);
588*404b540aSrobert static void move_succs (VEC(edge,gc) **, basic_block);
589*404b540aSrobert static void init_glat (void);
590*404b540aSrobert static void init_glat1 (basic_block);
591*404b540aSrobert static void attach_life_info1 (basic_block);
592*404b540aSrobert static void free_glat (void);
593*404b540aSrobert static void sched_remove_insn (rtx);
594*404b540aSrobert static void clear_priorities (rtx);
595*404b540aSrobert static void add_jump_dependencies (rtx, rtx);
596*404b540aSrobert static void calc_priorities (rtx);
597*404b540aSrobert #ifdef ENABLE_CHECKING
598*404b540aSrobert static int has_edge_p (VEC(edge,gc) *, int);
599*404b540aSrobert static void check_cfg (rtx, rtx);
600*404b540aSrobert static void check_sched_flags (void);
601*404b540aSrobert #endif
602*404b540aSrobert 
603*404b540aSrobert #endif /* INSN_SCHEDULING */
604*404b540aSrobert 
605*404b540aSrobert /* Point to state used for the current scheduling pass.  */
606*404b540aSrobert struct sched_info *current_sched_info;
607*404b540aSrobert 
608*404b540aSrobert #ifndef INSN_SCHEDULING
609*404b540aSrobert void
schedule_insns(void)610*404b540aSrobert schedule_insns (void)
611*404b540aSrobert {
612*404b540aSrobert }
613*404b540aSrobert #else
614*404b540aSrobert 
615*404b540aSrobert /* Working copy of frontend's sched_info variable.  */
616*404b540aSrobert static struct sched_info current_sched_info_var;
617*404b540aSrobert 
618*404b540aSrobert /* Pointer to the last instruction scheduled.  Used by rank_for_schedule,
619*404b540aSrobert    so that insns independent of the last scheduled insn will be preferred
620*404b540aSrobert    over dependent instructions.  */
621*404b540aSrobert 
622*404b540aSrobert static rtx last_scheduled_insn;
623*404b540aSrobert 
624*404b540aSrobert /* Compute cost of executing INSN given the dependence LINK on the insn USED.
625*404b540aSrobert    This is the number of cycles between instruction issue and
626*404b540aSrobert    instruction results.  */
627*404b540aSrobert 
628*404b540aSrobert HAIFA_INLINE int
insn_cost(rtx insn,rtx link,rtx used)629*404b540aSrobert insn_cost (rtx insn, rtx link, rtx used)
630*404b540aSrobert {
631*404b540aSrobert   return insn_cost1 (insn, used ? REG_NOTE_KIND (link) : REG_NOTE_MAX,
632*404b540aSrobert 		     link, used);
633*404b540aSrobert }
634*404b540aSrobert 
635*404b540aSrobert /* Compute cost of executing INSN given the dependence on the insn USED.
636*404b540aSrobert    If LINK is not NULL, then its REG_NOTE_KIND is used as a dependence type.
637*404b540aSrobert    Otherwise, dependence between INSN and USED is assumed to be of type
638*404b540aSrobert    DEP_TYPE.  This function was introduced as a workaround for
639*404b540aSrobert    targetm.adjust_cost hook.
640*404b540aSrobert    This is the number of cycles between instruction issue and
641*404b540aSrobert    instruction results.  */
642*404b540aSrobert 
643*404b540aSrobert HAIFA_INLINE static int
insn_cost1(rtx insn,enum reg_note dep_type,rtx link,rtx used)644*404b540aSrobert insn_cost1 (rtx insn, enum reg_note dep_type, rtx link, rtx used)
645*404b540aSrobert {
646*404b540aSrobert   int cost = INSN_COST (insn);
647*404b540aSrobert 
648*404b540aSrobert   if (cost < 0)
649*404b540aSrobert     {
650*404b540aSrobert       /* A USE insn, or something else we don't need to
651*404b540aSrobert 	 understand.  We can't pass these directly to
652*404b540aSrobert 	 result_ready_cost or insn_default_latency because it will
653*404b540aSrobert 	 trigger a fatal error for unrecognizable insns.  */
654*404b540aSrobert       if (recog_memoized (insn) < 0)
655*404b540aSrobert 	{
656*404b540aSrobert 	  INSN_COST (insn) = 0;
657*404b540aSrobert 	  return 0;
658*404b540aSrobert 	}
659*404b540aSrobert       else
660*404b540aSrobert 	{
661*404b540aSrobert 	  cost = insn_default_latency (insn);
662*404b540aSrobert 	  if (cost < 0)
663*404b540aSrobert 	    cost = 0;
664*404b540aSrobert 
665*404b540aSrobert 	  INSN_COST (insn) = cost;
666*404b540aSrobert 	}
667*404b540aSrobert     }
668*404b540aSrobert 
669*404b540aSrobert   /* In this case estimate cost without caring how insn is used.  */
670*404b540aSrobert   if (used == 0)
671*404b540aSrobert     return cost;
672*404b540aSrobert 
673*404b540aSrobert   /* A USE insn should never require the value used to be computed.
674*404b540aSrobert      This allows the computation of a function's result and parameter
675*404b540aSrobert      values to overlap the return and call.  */
676*404b540aSrobert   if (recog_memoized (used) < 0)
677*404b540aSrobert     cost = 0;
678*404b540aSrobert   else
679*404b540aSrobert     {
680*404b540aSrobert       gcc_assert (!link || dep_type == REG_NOTE_KIND (link));
681*404b540aSrobert 
682*404b540aSrobert       if (INSN_CODE (insn) >= 0)
683*404b540aSrobert 	{
684*404b540aSrobert 	  if (dep_type == REG_DEP_ANTI)
685*404b540aSrobert 	    cost = 0;
686*404b540aSrobert 	  else if (dep_type == REG_DEP_OUTPUT)
687*404b540aSrobert 	    {
688*404b540aSrobert 	      cost = (insn_default_latency (insn)
689*404b540aSrobert 		      - insn_default_latency (used));
690*404b540aSrobert 	      if (cost <= 0)
691*404b540aSrobert 		cost = 1;
692*404b540aSrobert 	    }
693*404b540aSrobert 	  else if (bypass_p (insn))
694*404b540aSrobert 	    cost = insn_latency (insn, used);
695*404b540aSrobert 	}
696*404b540aSrobert 
697*404b540aSrobert       if (targetm.sched.adjust_cost_2)
698*404b540aSrobert 	cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost);
699*404b540aSrobert       else
700*404b540aSrobert 	{
701*404b540aSrobert 	  gcc_assert (link);
702*404b540aSrobert 	  if (targetm.sched.adjust_cost)
703*404b540aSrobert 	    cost = targetm.sched.adjust_cost (used, link, insn, cost);
704*404b540aSrobert 	}
705*404b540aSrobert 
706*404b540aSrobert       if (cost < 0)
707*404b540aSrobert 	cost = 0;
708*404b540aSrobert     }
709*404b540aSrobert 
710*404b540aSrobert   return cost;
711*404b540aSrobert }
712*404b540aSrobert 
713*404b540aSrobert /* Compute the priority number for INSN.  */
714*404b540aSrobert 
715*404b540aSrobert static int
priority(rtx insn)716*404b540aSrobert priority (rtx insn)
717*404b540aSrobert {
718*404b540aSrobert   rtx link;
719*404b540aSrobert 
720*404b540aSrobert   if (! INSN_P (insn))
721*404b540aSrobert     return 0;
722*404b540aSrobert 
723*404b540aSrobert   if (! INSN_PRIORITY_KNOWN (insn))
724*404b540aSrobert     {
725*404b540aSrobert       int this_priority = 0;
726*404b540aSrobert 
727*404b540aSrobert       if (INSN_DEPEND (insn) == 0)
728*404b540aSrobert 	this_priority = insn_cost (insn, 0, 0);
729*404b540aSrobert       else
730*404b540aSrobert 	{
731*404b540aSrobert 	  rtx prev_first, twin;
732*404b540aSrobert 	  basic_block rec;
733*404b540aSrobert 
734*404b540aSrobert 	  /* For recovery check instructions we calculate priority slightly
735*404b540aSrobert 	     different than that of normal instructions.  Instead of walking
736*404b540aSrobert 	     through INSN_DEPEND (check) list, we walk through INSN_DEPEND list
737*404b540aSrobert 	     of each instruction in the corresponding recovery block.  */
738*404b540aSrobert 
739*404b540aSrobert 	  rec = RECOVERY_BLOCK (insn);
740*404b540aSrobert 	  if (!rec || rec == EXIT_BLOCK_PTR)
741*404b540aSrobert 	    {
742*404b540aSrobert 	      prev_first = PREV_INSN (insn);
743*404b540aSrobert 	      twin = insn;
744*404b540aSrobert 	    }
745*404b540aSrobert 	  else
746*404b540aSrobert 	    {
747*404b540aSrobert 	      prev_first = NEXT_INSN (BB_HEAD (rec));
748*404b540aSrobert 	      twin = PREV_INSN (BB_END (rec));
749*404b540aSrobert 	    }
750*404b540aSrobert 
751*404b540aSrobert 	  do
752*404b540aSrobert 	    {
753*404b540aSrobert 	      for (link = INSN_DEPEND (twin); link; link = XEXP (link, 1))
754*404b540aSrobert 		{
755*404b540aSrobert 		  rtx next;
756*404b540aSrobert 		  int next_priority;
757*404b540aSrobert 
758*404b540aSrobert 		  next = XEXP (link, 0);
759*404b540aSrobert 
760*404b540aSrobert 		  if (BLOCK_FOR_INSN (next) != rec)
761*404b540aSrobert 		    {
762*404b540aSrobert 		      /* Critical path is meaningful in block boundaries
763*404b540aSrobert 			 only.  */
764*404b540aSrobert 		      if (! (*current_sched_info->contributes_to_priority)
765*404b540aSrobert 			  (next, insn)
766*404b540aSrobert 			  /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
767*404b540aSrobert 			     then speculative instructions will less likely be
768*404b540aSrobert 			     scheduled.  That is because the priority of
769*404b540aSrobert 			     their producers will increase, and, thus, the
770*404b540aSrobert 			     producers will more likely be scheduled, thus,
771*404b540aSrobert 			     resolving the dependence.  */
772*404b540aSrobert 			  || ((current_sched_info->flags & DO_SPECULATION)
773*404b540aSrobert 			      && (DEP_STATUS (link) & SPECULATIVE)
774*404b540aSrobert 			      && !(spec_info->flags
775*404b540aSrobert 				   & COUNT_SPEC_IN_CRITICAL_PATH)))
776*404b540aSrobert 			continue;
777*404b540aSrobert 
778*404b540aSrobert 		      next_priority = insn_cost1 (insn,
779*404b540aSrobert 						  twin == insn ?
780*404b540aSrobert 						  REG_NOTE_KIND (link) :
781*404b540aSrobert 						  REG_DEP_ANTI,
782*404b540aSrobert 						  twin == insn ? link : 0,
783*404b540aSrobert 						  next) + priority (next);
784*404b540aSrobert 
785*404b540aSrobert 		      if (next_priority > this_priority)
786*404b540aSrobert 			this_priority = next_priority;
787*404b540aSrobert 		    }
788*404b540aSrobert 		}
789*404b540aSrobert 
790*404b540aSrobert 	      twin = PREV_INSN (twin);
791*404b540aSrobert 	    }
792*404b540aSrobert 	  while (twin != prev_first);
793*404b540aSrobert 	}
794*404b540aSrobert       INSN_PRIORITY (insn) = this_priority;
795*404b540aSrobert       INSN_PRIORITY_KNOWN (insn) = 1;
796*404b540aSrobert     }
797*404b540aSrobert 
798*404b540aSrobert   return INSN_PRIORITY (insn);
799*404b540aSrobert }
800*404b540aSrobert 
801*404b540aSrobert /* Macros and functions for keeping the priority queue sorted, and
802*404b540aSrobert    dealing with queuing and dequeuing of instructions.  */
803*404b540aSrobert 
804*404b540aSrobert #define SCHED_SORT(READY, N_READY)                                   \
805*404b540aSrobert do { if ((N_READY) == 2)				             \
806*404b540aSrobert        swap_sort (READY, N_READY);			             \
807*404b540aSrobert      else if ((N_READY) > 2)                                         \
808*404b540aSrobert          qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); }  \
809*404b540aSrobert while (0)
810*404b540aSrobert 
811*404b540aSrobert /* Returns a positive value if x is preferred; returns a negative value if
812*404b540aSrobert    y is preferred.  Should never return 0, since that will make the sort
813*404b540aSrobert    unstable.  */
814*404b540aSrobert 
815*404b540aSrobert static int
rank_for_schedule(const void * x,const void * y)816*404b540aSrobert rank_for_schedule (const void *x, const void *y)
817*404b540aSrobert {
818*404b540aSrobert   rtx tmp = *(const rtx *) y;
819*404b540aSrobert   rtx tmp2 = *(const rtx *) x;
820*404b540aSrobert   rtx link;
821*404b540aSrobert   int tmp_class, tmp2_class, depend_count1, depend_count2;
822*404b540aSrobert   int val, priority_val, weight_val, info_val;
823*404b540aSrobert 
824*404b540aSrobert   /* The insn in a schedule group should be issued the first.  */
825*404b540aSrobert   if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
826*404b540aSrobert     return SCHED_GROUP_P (tmp2) ? 1 : -1;
827*404b540aSrobert 
828*404b540aSrobert   /* Prefer insn with higher priority.  */
829*404b540aSrobert   priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
830*404b540aSrobert 
831*404b540aSrobert   if (priority_val)
832*404b540aSrobert     return priority_val;
833*404b540aSrobert 
834*404b540aSrobert   /* Prefer speculative insn with greater dependencies weakness.  */
835*404b540aSrobert   if (spec_info)
836*404b540aSrobert     {
837*404b540aSrobert       ds_t ds1, ds2;
838*404b540aSrobert       dw_t dw1, dw2;
839*404b540aSrobert       int dw;
840*404b540aSrobert 
841*404b540aSrobert       ds1 = TODO_SPEC (tmp) & SPECULATIVE;
842*404b540aSrobert       if (ds1)
843*404b540aSrobert 	dw1 = dep_weak (ds1);
844*404b540aSrobert       else
845*404b540aSrobert 	dw1 = NO_DEP_WEAK;
846*404b540aSrobert 
847*404b540aSrobert       ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
848*404b540aSrobert       if (ds2)
849*404b540aSrobert 	dw2 = dep_weak (ds2);
850*404b540aSrobert       else
851*404b540aSrobert 	dw2 = NO_DEP_WEAK;
852*404b540aSrobert 
853*404b540aSrobert       dw = dw2 - dw1;
854*404b540aSrobert       if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
855*404b540aSrobert 	return dw;
856*404b540aSrobert     }
857*404b540aSrobert 
858*404b540aSrobert   /* Prefer an insn with smaller contribution to registers-pressure.  */
859*404b540aSrobert   if (!reload_completed &&
860*404b540aSrobert       (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
861*404b540aSrobert     return weight_val;
862*404b540aSrobert 
863*404b540aSrobert   info_val = (*current_sched_info->rank) (tmp, tmp2);
864*404b540aSrobert   if (info_val)
865*404b540aSrobert     return info_val;
866*404b540aSrobert 
867*404b540aSrobert   /* Compare insns based on their relation to the last-scheduled-insn.  */
868*404b540aSrobert   if (INSN_P (last_scheduled_insn))
869*404b540aSrobert     {
870*404b540aSrobert       /* Classify the instructions into three classes:
871*404b540aSrobert          1) Data dependent on last schedule insn.
872*404b540aSrobert          2) Anti/Output dependent on last scheduled insn.
873*404b540aSrobert          3) Independent of last scheduled insn, or has latency of one.
874*404b540aSrobert          Choose the insn from the highest numbered class if different.  */
875*404b540aSrobert       link = find_insn_list (tmp, INSN_DEPEND (last_scheduled_insn));
876*404b540aSrobert       if (link == 0 || insn_cost (last_scheduled_insn, link, tmp) == 1)
877*404b540aSrobert 	tmp_class = 3;
878*404b540aSrobert       else if (REG_NOTE_KIND (link) == 0)	/* Data dependence.  */
879*404b540aSrobert 	tmp_class = 1;
880*404b540aSrobert       else
881*404b540aSrobert 	tmp_class = 2;
882*404b540aSrobert 
883*404b540aSrobert       link = find_insn_list (tmp2, INSN_DEPEND (last_scheduled_insn));
884*404b540aSrobert       if (link == 0 || insn_cost (last_scheduled_insn, link, tmp2) == 1)
885*404b540aSrobert 	tmp2_class = 3;
886*404b540aSrobert       else if (REG_NOTE_KIND (link) == 0)	/* Data dependence.  */
887*404b540aSrobert 	tmp2_class = 1;
888*404b540aSrobert       else
889*404b540aSrobert 	tmp2_class = 2;
890*404b540aSrobert 
891*404b540aSrobert       if ((val = tmp2_class - tmp_class))
892*404b540aSrobert 	return val;
893*404b540aSrobert     }
894*404b540aSrobert 
895*404b540aSrobert   /* Prefer the insn which has more later insns that depend on it.
896*404b540aSrobert      This gives the scheduler more freedom when scheduling later
897*404b540aSrobert      instructions at the expense of added register pressure.  */
898*404b540aSrobert   depend_count1 = 0;
899*404b540aSrobert   for (link = INSN_DEPEND (tmp); link; link = XEXP (link, 1))
900*404b540aSrobert     depend_count1++;
901*404b540aSrobert 
902*404b540aSrobert   depend_count2 = 0;
903*404b540aSrobert   for (link = INSN_DEPEND (tmp2); link; link = XEXP (link, 1))
904*404b540aSrobert     depend_count2++;
905*404b540aSrobert 
906*404b540aSrobert   val = depend_count2 - depend_count1;
907*404b540aSrobert   if (val)
908*404b540aSrobert     return val;
909*404b540aSrobert 
910*404b540aSrobert   /* If insns are equally good, sort by INSN_LUID (original insn order),
911*404b540aSrobert      so that we make the sort stable.  This minimizes instruction movement,
912*404b540aSrobert      thus minimizing sched's effect on debugging and cross-jumping.  */
913*404b540aSrobert   return INSN_LUID (tmp) - INSN_LUID (tmp2);
914*404b540aSrobert }
915*404b540aSrobert 
916*404b540aSrobert /* Resort the array A in which only element at index N may be out of order.  */
917*404b540aSrobert 
918*404b540aSrobert HAIFA_INLINE static void
swap_sort(rtx * a,int n)919*404b540aSrobert swap_sort (rtx *a, int n)
920*404b540aSrobert {
921*404b540aSrobert   rtx insn = a[n - 1];
922*404b540aSrobert   int i = n - 2;
923*404b540aSrobert 
924*404b540aSrobert   while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
925*404b540aSrobert     {
926*404b540aSrobert       a[i + 1] = a[i];
927*404b540aSrobert       i -= 1;
928*404b540aSrobert     }
929*404b540aSrobert   a[i + 1] = insn;
930*404b540aSrobert }
931*404b540aSrobert 
932*404b540aSrobert /* Add INSN to the insn queue so that it can be executed at least
933*404b540aSrobert    N_CYCLES after the currently executing insn.  Preserve insns
934*404b540aSrobert    chain for debugging purposes.  */
935*404b540aSrobert 
936*404b540aSrobert HAIFA_INLINE static void
queue_insn(rtx insn,int n_cycles)937*404b540aSrobert queue_insn (rtx insn, int n_cycles)
938*404b540aSrobert {
939*404b540aSrobert   int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
940*404b540aSrobert   rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
941*404b540aSrobert 
942*404b540aSrobert   gcc_assert (n_cycles <= max_insn_queue_index);
943*404b540aSrobert 
944*404b540aSrobert   insn_queue[next_q] = link;
945*404b540aSrobert   q_size += 1;
946*404b540aSrobert 
947*404b540aSrobert   if (sched_verbose >= 2)
948*404b540aSrobert     {
949*404b540aSrobert       fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
950*404b540aSrobert 	       (*current_sched_info->print_insn) (insn, 0));
951*404b540aSrobert 
952*404b540aSrobert       fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
953*404b540aSrobert     }
954*404b540aSrobert 
955*404b540aSrobert   QUEUE_INDEX (insn) = next_q;
956*404b540aSrobert }
957*404b540aSrobert 
958*404b540aSrobert /* Remove INSN from queue.  */
959*404b540aSrobert static void
queue_remove(rtx insn)960*404b540aSrobert queue_remove (rtx insn)
961*404b540aSrobert {
962*404b540aSrobert   gcc_assert (QUEUE_INDEX (insn) >= 0);
963*404b540aSrobert   remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
964*404b540aSrobert   q_size--;
965*404b540aSrobert   QUEUE_INDEX (insn) = QUEUE_NOWHERE;
966*404b540aSrobert }
967*404b540aSrobert 
968*404b540aSrobert /* Return a pointer to the bottom of the ready list, i.e. the insn
969*404b540aSrobert    with the lowest priority.  */
970*404b540aSrobert 
971*404b540aSrobert HAIFA_INLINE static rtx *
ready_lastpos(struct ready_list * ready)972*404b540aSrobert ready_lastpos (struct ready_list *ready)
973*404b540aSrobert {
974*404b540aSrobert   gcc_assert (ready->n_ready >= 1);
975*404b540aSrobert   return ready->vec + ready->first - ready->n_ready + 1;
976*404b540aSrobert }
977*404b540aSrobert 
978*404b540aSrobert /* Add an element INSN to the ready list so that it ends up with the
979*404b540aSrobert    lowest/highest priority depending on FIRST_P.  */
980*404b540aSrobert 
981*404b540aSrobert HAIFA_INLINE static void
ready_add(struct ready_list * ready,rtx insn,bool first_p)982*404b540aSrobert ready_add (struct ready_list *ready, rtx insn, bool first_p)
983*404b540aSrobert {
984*404b540aSrobert   if (!first_p)
985*404b540aSrobert     {
986*404b540aSrobert       if (ready->first == ready->n_ready)
987*404b540aSrobert 	{
988*404b540aSrobert 	  memmove (ready->vec + ready->veclen - ready->n_ready,
989*404b540aSrobert 		   ready_lastpos (ready),
990*404b540aSrobert 		   ready->n_ready * sizeof (rtx));
991*404b540aSrobert 	  ready->first = ready->veclen - 1;
992*404b540aSrobert 	}
993*404b540aSrobert       ready->vec[ready->first - ready->n_ready] = insn;
994*404b540aSrobert     }
995*404b540aSrobert   else
996*404b540aSrobert     {
997*404b540aSrobert       if (ready->first == ready->veclen - 1)
998*404b540aSrobert 	{
999*404b540aSrobert 	  if (ready->n_ready)
1000*404b540aSrobert 	    /* ready_lastpos() fails when called with (ready->n_ready == 0).  */
1001*404b540aSrobert 	    memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1002*404b540aSrobert 		     ready_lastpos (ready),
1003*404b540aSrobert 		     ready->n_ready * sizeof (rtx));
1004*404b540aSrobert 	  ready->first = ready->veclen - 2;
1005*404b540aSrobert 	}
1006*404b540aSrobert       ready->vec[++(ready->first)] = insn;
1007*404b540aSrobert     }
1008*404b540aSrobert 
1009*404b540aSrobert   ready->n_ready++;
1010*404b540aSrobert 
1011*404b540aSrobert   gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1012*404b540aSrobert   QUEUE_INDEX (insn) = QUEUE_READY;
1013*404b540aSrobert }
1014*404b540aSrobert 
1015*404b540aSrobert /* Remove the element with the highest priority from the ready list and
1016*404b540aSrobert    return it.  */
1017*404b540aSrobert 
1018*404b540aSrobert HAIFA_INLINE static rtx
ready_remove_first(struct ready_list * ready)1019*404b540aSrobert ready_remove_first (struct ready_list *ready)
1020*404b540aSrobert {
1021*404b540aSrobert   rtx t;
1022*404b540aSrobert 
1023*404b540aSrobert   gcc_assert (ready->n_ready);
1024*404b540aSrobert   t = ready->vec[ready->first--];
1025*404b540aSrobert   ready->n_ready--;
1026*404b540aSrobert   /* If the queue becomes empty, reset it.  */
1027*404b540aSrobert   if (ready->n_ready == 0)
1028*404b540aSrobert     ready->first = ready->veclen - 1;
1029*404b540aSrobert 
1030*404b540aSrobert   gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1031*404b540aSrobert   QUEUE_INDEX (t) = QUEUE_NOWHERE;
1032*404b540aSrobert 
1033*404b540aSrobert   return t;
1034*404b540aSrobert }
1035*404b540aSrobert 
1036*404b540aSrobert /* The following code implements multi-pass scheduling for the first
1037*404b540aSrobert    cycle.  In other words, we will try to choose ready insn which
1038*404b540aSrobert    permits to start maximum number of insns on the same cycle.  */
1039*404b540aSrobert 
1040*404b540aSrobert /* Return a pointer to the element INDEX from the ready.  INDEX for
1041*404b540aSrobert    insn with the highest priority is 0, and the lowest priority has
1042*404b540aSrobert    N_READY - 1.  */
1043*404b540aSrobert 
1044*404b540aSrobert HAIFA_INLINE static rtx
ready_element(struct ready_list * ready,int index)1045*404b540aSrobert ready_element (struct ready_list *ready, int index)
1046*404b540aSrobert {
1047*404b540aSrobert   gcc_assert (ready->n_ready && index < ready->n_ready);
1048*404b540aSrobert 
1049*404b540aSrobert   return ready->vec[ready->first - index];
1050*404b540aSrobert }
1051*404b540aSrobert 
1052*404b540aSrobert /* Remove the element INDEX from the ready list and return it.  INDEX
1053*404b540aSrobert    for insn with the highest priority is 0, and the lowest priority
1054*404b540aSrobert    has N_READY - 1.  */
1055*404b540aSrobert 
1056*404b540aSrobert HAIFA_INLINE static rtx
ready_remove(struct ready_list * ready,int index)1057*404b540aSrobert ready_remove (struct ready_list *ready, int index)
1058*404b540aSrobert {
1059*404b540aSrobert   rtx t;
1060*404b540aSrobert   int i;
1061*404b540aSrobert 
1062*404b540aSrobert   if (index == 0)
1063*404b540aSrobert     return ready_remove_first (ready);
1064*404b540aSrobert   gcc_assert (ready->n_ready && index < ready->n_ready);
1065*404b540aSrobert   t = ready->vec[ready->first - index];
1066*404b540aSrobert   ready->n_ready--;
1067*404b540aSrobert   for (i = index; i < ready->n_ready; i++)
1068*404b540aSrobert     ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1069*404b540aSrobert   QUEUE_INDEX (t) = QUEUE_NOWHERE;
1070*404b540aSrobert   return t;
1071*404b540aSrobert }
1072*404b540aSrobert 
1073*404b540aSrobert /* Remove INSN from the ready list.  */
1074*404b540aSrobert static void
ready_remove_insn(rtx insn)1075*404b540aSrobert ready_remove_insn (rtx insn)
1076*404b540aSrobert {
1077*404b540aSrobert   int i;
1078*404b540aSrobert 
1079*404b540aSrobert   for (i = 0; i < readyp->n_ready; i++)
1080*404b540aSrobert     if (ready_element (readyp, i) == insn)
1081*404b540aSrobert       {
1082*404b540aSrobert         ready_remove (readyp, i);
1083*404b540aSrobert         return;
1084*404b540aSrobert       }
1085*404b540aSrobert   gcc_unreachable ();
1086*404b540aSrobert }
1087*404b540aSrobert 
1088*404b540aSrobert /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1089*404b540aSrobert    macro.  */
1090*404b540aSrobert 
1091*404b540aSrobert HAIFA_INLINE static void
ready_sort(struct ready_list * ready)1092*404b540aSrobert ready_sort (struct ready_list *ready)
1093*404b540aSrobert {
1094*404b540aSrobert   rtx *first = ready_lastpos (ready);
1095*404b540aSrobert   SCHED_SORT (first, ready->n_ready);
1096*404b540aSrobert }
1097*404b540aSrobert 
1098*404b540aSrobert /* PREV is an insn that is ready to execute.  Adjust its priority if that
1099*404b540aSrobert    will help shorten or lengthen register lifetimes as appropriate.  Also
1100*404b540aSrobert    provide a hook for the target to tweek itself.  */
1101*404b540aSrobert 
1102*404b540aSrobert HAIFA_INLINE static void
adjust_priority(rtx prev)1103*404b540aSrobert adjust_priority (rtx prev)
1104*404b540aSrobert {
1105*404b540aSrobert   /* ??? There used to be code here to try and estimate how an insn
1106*404b540aSrobert      affected register lifetimes, but it did it by looking at REG_DEAD
1107*404b540aSrobert      notes, which we removed in schedule_region.  Nor did it try to
1108*404b540aSrobert      take into account register pressure or anything useful like that.
1109*404b540aSrobert 
1110*404b540aSrobert      Revisit when we have a machine model to work with and not before.  */
1111*404b540aSrobert 
1112*404b540aSrobert   if (targetm.sched.adjust_priority)
1113*404b540aSrobert     INSN_PRIORITY (prev) =
1114*404b540aSrobert       targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1115*404b540aSrobert }
1116*404b540aSrobert 
1117*404b540aSrobert /* Advance time on one cycle.  */
1118*404b540aSrobert HAIFA_INLINE static void
advance_one_cycle(void)1119*404b540aSrobert advance_one_cycle (void)
1120*404b540aSrobert {
1121*404b540aSrobert   if (targetm.sched.dfa_pre_cycle_insn)
1122*404b540aSrobert     state_transition (curr_state,
1123*404b540aSrobert 		      targetm.sched.dfa_pre_cycle_insn ());
1124*404b540aSrobert 
1125*404b540aSrobert   state_transition (curr_state, NULL);
1126*404b540aSrobert 
1127*404b540aSrobert   if (targetm.sched.dfa_post_cycle_insn)
1128*404b540aSrobert     state_transition (curr_state,
1129*404b540aSrobert 		      targetm.sched.dfa_post_cycle_insn ());
1130*404b540aSrobert }
1131*404b540aSrobert 
1132*404b540aSrobert /* Clock at which the previous instruction was issued.  */
1133*404b540aSrobert static int last_clock_var;
1134*404b540aSrobert 
1135*404b540aSrobert /* INSN is the "currently executing insn".  Launch each insn which was
1136*404b540aSrobert    waiting on INSN.  READY is the ready list which contains the insns
1137*404b540aSrobert    that are ready to fire.  CLOCK is the current cycle.  The function
1138*404b540aSrobert    returns necessary cycle advance after issuing the insn (it is not
1139*404b540aSrobert    zero for insns in a schedule group).  */
1140*404b540aSrobert 
1141*404b540aSrobert static int
schedule_insn(rtx insn)1142*404b540aSrobert schedule_insn (rtx insn)
1143*404b540aSrobert {
1144*404b540aSrobert   rtx link;
1145*404b540aSrobert   int advance = 0;
1146*404b540aSrobert 
1147*404b540aSrobert   if (sched_verbose >= 1)
1148*404b540aSrobert     {
1149*404b540aSrobert       char buf[2048];
1150*404b540aSrobert 
1151*404b540aSrobert       print_insn (buf, insn, 0);
1152*404b540aSrobert       buf[40] = 0;
1153*404b540aSrobert       fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1154*404b540aSrobert 
1155*404b540aSrobert       if (recog_memoized (insn) < 0)
1156*404b540aSrobert 	fprintf (sched_dump, "nothing");
1157*404b540aSrobert       else
1158*404b540aSrobert 	print_reservation (sched_dump, insn);
1159*404b540aSrobert       fputc ('\n', sched_dump);
1160*404b540aSrobert     }
1161*404b540aSrobert 
1162*404b540aSrobert   /* Scheduling instruction should have all its dependencies resolved and
1163*404b540aSrobert      should have been removed from the ready list.  */
1164*404b540aSrobert   gcc_assert (INSN_DEP_COUNT (insn) == 0);
1165*404b540aSrobert   gcc_assert (!LOG_LINKS (insn));
1166*404b540aSrobert   gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1167*404b540aSrobert 
1168*404b540aSrobert   QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1169*404b540aSrobert 
1170*404b540aSrobert   /* Now we can free RESOLVED_DEPS list.  */
1171*404b540aSrobert   if (current_sched_info->flags & USE_DEPS_LIST)
1172*404b540aSrobert     free_DEPS_LIST_list (&RESOLVED_DEPS (insn));
1173*404b540aSrobert   else
1174*404b540aSrobert     free_INSN_LIST_list (&RESOLVED_DEPS (insn));
1175*404b540aSrobert 
1176*404b540aSrobert   gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1177*404b540aSrobert   if (INSN_TICK (insn) > clock_var)
1178*404b540aSrobert     /* INSN has been prematurely moved from the queue to the ready list.
1179*404b540aSrobert        This is possible only if following flag is set.  */
1180*404b540aSrobert     gcc_assert (flag_sched_stalled_insns);
1181*404b540aSrobert 
1182*404b540aSrobert   /* ??? Probably, if INSN is scheduled prematurely, we should leave
1183*404b540aSrobert      INSN_TICK untouched.  This is a machine-dependent issue, actually.  */
1184*404b540aSrobert   INSN_TICK (insn) = clock_var;
1185*404b540aSrobert 
1186*404b540aSrobert   /* Update dependent instructions.  */
1187*404b540aSrobert   for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
1188*404b540aSrobert     {
1189*404b540aSrobert       rtx next = XEXP (link, 0);
1190*404b540aSrobert 
1191*404b540aSrobert       resolve_dep (next, insn);
1192*404b540aSrobert 
1193*404b540aSrobert       if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
1194*404b540aSrobert 	{
1195*404b540aSrobert 	  int effective_cost;
1196*404b540aSrobert 
1197*404b540aSrobert 	  effective_cost = try_ready (next);
1198*404b540aSrobert 
1199*404b540aSrobert 	  if (effective_cost >= 0
1200*404b540aSrobert 	      && SCHED_GROUP_P (next)
1201*404b540aSrobert 	      && advance < effective_cost)
1202*404b540aSrobert 	    advance = effective_cost;
1203*404b540aSrobert 	}
1204*404b540aSrobert       else
1205*404b540aSrobert 	/* Check always has only one forward dependence (to the first insn in
1206*404b540aSrobert 	   the recovery block), therefore, this will be executed only once.  */
1207*404b540aSrobert 	{
1208*404b540aSrobert 	  gcc_assert (XEXP (link, 1) == 0);
1209*404b540aSrobert 	  fix_recovery_deps (RECOVERY_BLOCK (insn));
1210*404b540aSrobert 	}
1211*404b540aSrobert     }
1212*404b540aSrobert 
1213*404b540aSrobert   /* Annotate the instruction with issue information -- TImode
1214*404b540aSrobert      indicates that the instruction is expected not to be able
1215*404b540aSrobert      to issue on the same cycle as the previous insn.  A machine
1216*404b540aSrobert      may use this information to decide how the instruction should
1217*404b540aSrobert      be aligned.  */
1218*404b540aSrobert   if (issue_rate > 1
1219*404b540aSrobert       && GET_CODE (PATTERN (insn)) != USE
1220*404b540aSrobert       && GET_CODE (PATTERN (insn)) != CLOBBER)
1221*404b540aSrobert     {
1222*404b540aSrobert       if (reload_completed)
1223*404b540aSrobert 	PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1224*404b540aSrobert       last_clock_var = clock_var;
1225*404b540aSrobert     }
1226*404b540aSrobert 
1227*404b540aSrobert   return advance;
1228*404b540aSrobert }
1229*404b540aSrobert 
1230*404b540aSrobert /* Functions for handling of notes.  */
1231*404b540aSrobert 
1232*404b540aSrobert /* Delete notes beginning with INSN and put them in the chain
1233*404b540aSrobert    of notes ended by NOTE_LIST.
1234*404b540aSrobert    Returns the insn following the notes.  */
1235*404b540aSrobert 
1236*404b540aSrobert static rtx
unlink_other_notes(rtx insn,rtx tail)1237*404b540aSrobert unlink_other_notes (rtx insn, rtx tail)
1238*404b540aSrobert {
1239*404b540aSrobert   rtx prev = PREV_INSN (insn);
1240*404b540aSrobert 
1241*404b540aSrobert   while (insn != tail && NOTE_NOT_BB_P (insn))
1242*404b540aSrobert     {
1243*404b540aSrobert       rtx next = NEXT_INSN (insn);
1244*404b540aSrobert       basic_block bb = BLOCK_FOR_INSN (insn);
1245*404b540aSrobert 
1246*404b540aSrobert       /* Delete the note from its current position.  */
1247*404b540aSrobert       if (prev)
1248*404b540aSrobert 	NEXT_INSN (prev) = next;
1249*404b540aSrobert       if (next)
1250*404b540aSrobert 	PREV_INSN (next) = prev;
1251*404b540aSrobert 
1252*404b540aSrobert       if (bb)
1253*404b540aSrobert         {
1254*404b540aSrobert           /* Basic block can begin with either LABEL or
1255*404b540aSrobert              NOTE_INSN_BASIC_BLOCK.  */
1256*404b540aSrobert           gcc_assert (BB_HEAD (bb) != insn);
1257*404b540aSrobert 
1258*404b540aSrobert           /* Check if we are removing last insn in the BB.  */
1259*404b540aSrobert           if (BB_END (bb) == insn)
1260*404b540aSrobert             BB_END (bb) = prev;
1261*404b540aSrobert         }
1262*404b540aSrobert 
1263*404b540aSrobert       /* See sched_analyze to see how these are handled.  */
1264*404b540aSrobert       if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
1265*404b540aSrobert 	  && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
1266*404b540aSrobert 	{
1267*404b540aSrobert 	  /* Insert the note at the end of the notes list.  */
1268*404b540aSrobert 	  PREV_INSN (insn) = note_list;
1269*404b540aSrobert 	  if (note_list)
1270*404b540aSrobert 	    NEXT_INSN (note_list) = insn;
1271*404b540aSrobert 	  note_list = insn;
1272*404b540aSrobert 	}
1273*404b540aSrobert 
1274*404b540aSrobert       insn = next;
1275*404b540aSrobert     }
1276*404b540aSrobert   return insn;
1277*404b540aSrobert }
1278*404b540aSrobert 
1279*404b540aSrobert /* Delete line notes beginning with INSN. Record line-number notes so
1280*404b540aSrobert    they can be reused.  Returns the insn following the notes.  */
1281*404b540aSrobert 
1282*404b540aSrobert static rtx
unlink_line_notes(rtx insn,rtx tail)1283*404b540aSrobert unlink_line_notes (rtx insn, rtx tail)
1284*404b540aSrobert {
1285*404b540aSrobert   rtx prev = PREV_INSN (insn);
1286*404b540aSrobert 
1287*404b540aSrobert   while (insn != tail && NOTE_NOT_BB_P (insn))
1288*404b540aSrobert     {
1289*404b540aSrobert       rtx next = NEXT_INSN (insn);
1290*404b540aSrobert 
1291*404b540aSrobert       if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
1292*404b540aSrobert 	{
1293*404b540aSrobert           basic_block bb = BLOCK_FOR_INSN (insn);
1294*404b540aSrobert 
1295*404b540aSrobert 	  /* Delete the note from its current position.  */
1296*404b540aSrobert 	  if (prev)
1297*404b540aSrobert 	    NEXT_INSN (prev) = next;
1298*404b540aSrobert 	  if (next)
1299*404b540aSrobert 	    PREV_INSN (next) = prev;
1300*404b540aSrobert 
1301*404b540aSrobert           if (bb)
1302*404b540aSrobert             {
1303*404b540aSrobert               /* Basic block can begin with either LABEL or
1304*404b540aSrobert                  NOTE_INSN_BASIC_BLOCK.  */
1305*404b540aSrobert               gcc_assert (BB_HEAD (bb) != insn);
1306*404b540aSrobert 
1307*404b540aSrobert               /* Check if we are removing last insn in the BB.  */
1308*404b540aSrobert               if (BB_END (bb) == insn)
1309*404b540aSrobert                 BB_END (bb) = prev;
1310*404b540aSrobert             }
1311*404b540aSrobert 
1312*404b540aSrobert 	  /* Record line-number notes so they can be reused.  */
1313*404b540aSrobert 	  LINE_NOTE (insn) = insn;
1314*404b540aSrobert 	}
1315*404b540aSrobert       else
1316*404b540aSrobert 	prev = insn;
1317*404b540aSrobert 
1318*404b540aSrobert       insn = next;
1319*404b540aSrobert     }
1320*404b540aSrobert   return insn;
1321*404b540aSrobert }
1322*404b540aSrobert 
1323*404b540aSrobert /* Return the head and tail pointers of ebb starting at BEG and ending
1324*404b540aSrobert    at END.  */
1325*404b540aSrobert 
1326*404b540aSrobert void
get_ebb_head_tail(basic_block beg,basic_block end,rtx * headp,rtx * tailp)1327*404b540aSrobert get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1328*404b540aSrobert {
1329*404b540aSrobert   rtx beg_head = BB_HEAD (beg);
1330*404b540aSrobert   rtx beg_tail = BB_END (beg);
1331*404b540aSrobert   rtx end_head = BB_HEAD (end);
1332*404b540aSrobert   rtx end_tail = BB_END (end);
1333*404b540aSrobert 
1334*404b540aSrobert   /* Don't include any notes or labels at the beginning of the BEG
1335*404b540aSrobert      basic block, or notes at the end of the END basic blocks.  */
1336*404b540aSrobert 
1337*404b540aSrobert   if (LABEL_P (beg_head))
1338*404b540aSrobert     beg_head = NEXT_INSN (beg_head);
1339*404b540aSrobert 
1340*404b540aSrobert   while (beg_head != beg_tail)
1341*404b540aSrobert     if (NOTE_P (beg_head))
1342*404b540aSrobert       beg_head = NEXT_INSN (beg_head);
1343*404b540aSrobert     else
1344*404b540aSrobert       break;
1345*404b540aSrobert 
1346*404b540aSrobert   *headp = beg_head;
1347*404b540aSrobert 
1348*404b540aSrobert   if (beg == end)
1349*404b540aSrobert     end_head = beg_head;
1350*404b540aSrobert   else if (LABEL_P (end_head))
1351*404b540aSrobert     end_head = NEXT_INSN (end_head);
1352*404b540aSrobert 
1353*404b540aSrobert   while (end_head != end_tail)
1354*404b540aSrobert     if (NOTE_P (end_tail))
1355*404b540aSrobert       end_tail = PREV_INSN (end_tail);
1356*404b540aSrobert     else
1357*404b540aSrobert       break;
1358*404b540aSrobert 
1359*404b540aSrobert   *tailp = end_tail;
1360*404b540aSrobert }
1361*404b540aSrobert 
1362*404b540aSrobert /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ].  */
1363*404b540aSrobert 
1364*404b540aSrobert int
no_real_insns_p(rtx head,rtx tail)1365*404b540aSrobert no_real_insns_p (rtx head, rtx tail)
1366*404b540aSrobert {
1367*404b540aSrobert   while (head != NEXT_INSN (tail))
1368*404b540aSrobert     {
1369*404b540aSrobert       if (!NOTE_P (head) && !LABEL_P (head))
1370*404b540aSrobert 	return 0;
1371*404b540aSrobert       head = NEXT_INSN (head);
1372*404b540aSrobert     }
1373*404b540aSrobert   return 1;
1374*404b540aSrobert }
1375*404b540aSrobert 
1376*404b540aSrobert /* Delete line notes from one block. Save them so they can be later restored
1377*404b540aSrobert    (in restore_line_notes).  HEAD and TAIL are the boundaries of the
1378*404b540aSrobert    block in which notes should be processed.  */
1379*404b540aSrobert 
1380*404b540aSrobert void
rm_line_notes(rtx head,rtx tail)1381*404b540aSrobert rm_line_notes (rtx head, rtx tail)
1382*404b540aSrobert {
1383*404b540aSrobert   rtx next_tail;
1384*404b540aSrobert   rtx insn;
1385*404b540aSrobert 
1386*404b540aSrobert   next_tail = NEXT_INSN (tail);
1387*404b540aSrobert   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1388*404b540aSrobert     {
1389*404b540aSrobert       rtx prev;
1390*404b540aSrobert 
1391*404b540aSrobert       /* Farm out notes, and maybe save them in NOTE_LIST.
1392*404b540aSrobert          This is needed to keep the debugger from
1393*404b540aSrobert          getting completely deranged.  */
1394*404b540aSrobert       if (NOTE_NOT_BB_P (insn))
1395*404b540aSrobert 	{
1396*404b540aSrobert 	  prev = insn;
1397*404b540aSrobert 	  insn = unlink_line_notes (insn, next_tail);
1398*404b540aSrobert 
1399*404b540aSrobert 	  gcc_assert (prev != tail && prev != head && insn != next_tail);
1400*404b540aSrobert 	}
1401*404b540aSrobert     }
1402*404b540aSrobert }
1403*404b540aSrobert 
1404*404b540aSrobert /* Save line number notes for each insn in block B.  HEAD and TAIL are
1405*404b540aSrobert    the boundaries of the block in which notes should be processed.  */
1406*404b540aSrobert 
1407*404b540aSrobert void
save_line_notes(int b,rtx head,rtx tail)1408*404b540aSrobert save_line_notes (int b, rtx head, rtx tail)
1409*404b540aSrobert {
1410*404b540aSrobert   rtx next_tail;
1411*404b540aSrobert 
1412*404b540aSrobert   /* We must use the true line number for the first insn in the block
1413*404b540aSrobert      that was computed and saved at the start of this pass.  We can't
1414*404b540aSrobert      use the current line number, because scheduling of the previous
1415*404b540aSrobert      block may have changed the current line number.  */
1416*404b540aSrobert 
1417*404b540aSrobert   rtx line = line_note_head[b];
1418*404b540aSrobert   rtx insn;
1419*404b540aSrobert 
1420*404b540aSrobert   next_tail = NEXT_INSN (tail);
1421*404b540aSrobert 
1422*404b540aSrobert   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1423*404b540aSrobert     if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1424*404b540aSrobert       line = insn;
1425*404b540aSrobert     else
1426*404b540aSrobert       LINE_NOTE (insn) = line;
1427*404b540aSrobert }
1428*404b540aSrobert 
1429*404b540aSrobert /* After a block was scheduled, insert line notes into the insns list.
1430*404b540aSrobert    HEAD and TAIL are the boundaries of the block in which notes should
1431*404b540aSrobert    be processed.  */
1432*404b540aSrobert 
1433*404b540aSrobert void
restore_line_notes(rtx head,rtx tail)1434*404b540aSrobert restore_line_notes (rtx head, rtx tail)
1435*404b540aSrobert {
1436*404b540aSrobert   rtx line, note, prev, new;
1437*404b540aSrobert   int added_notes = 0;
1438*404b540aSrobert   rtx next_tail, insn;
1439*404b540aSrobert 
1440*404b540aSrobert   head = head;
1441*404b540aSrobert   next_tail = NEXT_INSN (tail);
1442*404b540aSrobert 
1443*404b540aSrobert   /* Determine the current line-number.  We want to know the current
1444*404b540aSrobert      line number of the first insn of the block here, in case it is
1445*404b540aSrobert      different from the true line number that was saved earlier.  If
1446*404b540aSrobert      different, then we need a line number note before the first insn
1447*404b540aSrobert      of this block.  If it happens to be the same, then we don't want to
1448*404b540aSrobert      emit another line number note here.  */
1449*404b540aSrobert   for (line = head; line; line = PREV_INSN (line))
1450*404b540aSrobert     if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
1451*404b540aSrobert       break;
1452*404b540aSrobert 
1453*404b540aSrobert   /* Walk the insns keeping track of the current line-number and inserting
1454*404b540aSrobert      the line-number notes as needed.  */
1455*404b540aSrobert   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1456*404b540aSrobert     if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1457*404b540aSrobert       line = insn;
1458*404b540aSrobert   /* This used to emit line number notes before every non-deleted note.
1459*404b540aSrobert      However, this confuses a debugger, because line notes not separated
1460*404b540aSrobert      by real instructions all end up at the same address.  I can find no
1461*404b540aSrobert      use for line number notes before other notes, so none are emitted.  */
1462*404b540aSrobert     else if (!NOTE_P (insn)
1463*404b540aSrobert 	     && INSN_UID (insn) < old_max_uid
1464*404b540aSrobert 	     && (note = LINE_NOTE (insn)) != 0
1465*404b540aSrobert 	     && note != line
1466*404b540aSrobert 	     && (line == 0
1467*404b540aSrobert #ifdef USE_MAPPED_LOCATION
1468*404b540aSrobert 		 || NOTE_SOURCE_LOCATION (note) != NOTE_SOURCE_LOCATION (line)
1469*404b540aSrobert #else
1470*404b540aSrobert 		 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
1471*404b540aSrobert 		 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)
1472*404b540aSrobert #endif
1473*404b540aSrobert 		 ))
1474*404b540aSrobert       {
1475*404b540aSrobert 	line = note;
1476*404b540aSrobert 	prev = PREV_INSN (insn);
1477*404b540aSrobert 	if (LINE_NOTE (note))
1478*404b540aSrobert 	  {
1479*404b540aSrobert 	    /* Re-use the original line-number note.  */
1480*404b540aSrobert 	    LINE_NOTE (note) = 0;
1481*404b540aSrobert 	    PREV_INSN (note) = prev;
1482*404b540aSrobert 	    NEXT_INSN (prev) = note;
1483*404b540aSrobert 	    PREV_INSN (insn) = note;
1484*404b540aSrobert 	    NEXT_INSN (note) = insn;
1485*404b540aSrobert 	    set_block_for_insn (note, BLOCK_FOR_INSN (insn));
1486*404b540aSrobert 	  }
1487*404b540aSrobert 	else
1488*404b540aSrobert 	  {
1489*404b540aSrobert 	    added_notes++;
1490*404b540aSrobert 	    new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
1491*404b540aSrobert #ifndef USE_MAPPED_LOCATION
1492*404b540aSrobert 	    NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
1493*404b540aSrobert #endif
1494*404b540aSrobert 	  }
1495*404b540aSrobert       }
1496*404b540aSrobert   if (sched_verbose && added_notes)
1497*404b540aSrobert     fprintf (sched_dump, ";; added %d line-number notes\n", added_notes);
1498*404b540aSrobert }
1499*404b540aSrobert 
1500*404b540aSrobert /* After scheduling the function, delete redundant line notes from the
1501*404b540aSrobert    insns list.  */
1502*404b540aSrobert 
1503*404b540aSrobert void
rm_redundant_line_notes(void)1504*404b540aSrobert rm_redundant_line_notes (void)
1505*404b540aSrobert {
1506*404b540aSrobert   rtx line = 0;
1507*404b540aSrobert   rtx insn = get_insns ();
1508*404b540aSrobert   int active_insn = 0;
1509*404b540aSrobert   int notes = 0;
1510*404b540aSrobert 
1511*404b540aSrobert   /* Walk the insns deleting redundant line-number notes.  Many of these
1512*404b540aSrobert      are already present.  The remainder tend to occur at basic
1513*404b540aSrobert      block boundaries.  */
1514*404b540aSrobert   for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
1515*404b540aSrobert     if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1516*404b540aSrobert       {
1517*404b540aSrobert 	/* If there are no active insns following, INSN is redundant.  */
1518*404b540aSrobert 	if (active_insn == 0)
1519*404b540aSrobert 	  {
1520*404b540aSrobert 	    notes++;
1521*404b540aSrobert 	    SET_INSN_DELETED (insn);
1522*404b540aSrobert 	  }
1523*404b540aSrobert 	/* If the line number is unchanged, LINE is redundant.  */
1524*404b540aSrobert 	else if (line
1525*404b540aSrobert #ifdef USE_MAPPED_LOCATION
1526*404b540aSrobert 		 && NOTE_SOURCE_LOCATION (line) == NOTE_SOURCE_LOCATION (insn)
1527*404b540aSrobert #else
1528*404b540aSrobert 		 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
1529*404b540aSrobert 		 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn)
1530*404b540aSrobert #endif
1531*404b540aSrobert )
1532*404b540aSrobert 	  {
1533*404b540aSrobert 	    notes++;
1534*404b540aSrobert 	    SET_INSN_DELETED (line);
1535*404b540aSrobert 	    line = insn;
1536*404b540aSrobert 	  }
1537*404b540aSrobert 	else
1538*404b540aSrobert 	  line = insn;
1539*404b540aSrobert 	active_insn = 0;
1540*404b540aSrobert       }
1541*404b540aSrobert     else if (!((NOTE_P (insn)
1542*404b540aSrobert 		&& NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
1543*404b540aSrobert 	       || (NONJUMP_INSN_P (insn)
1544*404b540aSrobert 		   && (GET_CODE (PATTERN (insn)) == USE
1545*404b540aSrobert 		       || GET_CODE (PATTERN (insn)) == CLOBBER))))
1546*404b540aSrobert       active_insn++;
1547*404b540aSrobert 
1548*404b540aSrobert   if (sched_verbose && notes)
1549*404b540aSrobert     fprintf (sched_dump, ";; deleted %d line-number notes\n", notes);
1550*404b540aSrobert }
1551*404b540aSrobert 
1552*404b540aSrobert /* Delete notes between HEAD and TAIL and put them in the chain
1553*404b540aSrobert    of notes ended by NOTE_LIST.  */
1554*404b540aSrobert 
1555*404b540aSrobert void
rm_other_notes(rtx head,rtx tail)1556*404b540aSrobert rm_other_notes (rtx head, rtx tail)
1557*404b540aSrobert {
1558*404b540aSrobert   rtx next_tail;
1559*404b540aSrobert   rtx insn;
1560*404b540aSrobert 
1561*404b540aSrobert   note_list = 0;
1562*404b540aSrobert   if (head == tail && (! INSN_P (head)))
1563*404b540aSrobert     return;
1564*404b540aSrobert 
1565*404b540aSrobert   next_tail = NEXT_INSN (tail);
1566*404b540aSrobert   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1567*404b540aSrobert     {
1568*404b540aSrobert       rtx prev;
1569*404b540aSrobert 
1570*404b540aSrobert       /* Farm out notes, and maybe save them in NOTE_LIST.
1571*404b540aSrobert          This is needed to keep the debugger from
1572*404b540aSrobert          getting completely deranged.  */
1573*404b540aSrobert       if (NOTE_NOT_BB_P (insn))
1574*404b540aSrobert 	{
1575*404b540aSrobert 	  prev = insn;
1576*404b540aSrobert 
1577*404b540aSrobert 	  insn = unlink_other_notes (insn, next_tail);
1578*404b540aSrobert 
1579*404b540aSrobert 	  gcc_assert (prev != tail && prev != head && insn != next_tail);
1580*404b540aSrobert 	}
1581*404b540aSrobert     }
1582*404b540aSrobert }
1583*404b540aSrobert 
1584*404b540aSrobert /* Functions for computation of registers live/usage info.  */
1585*404b540aSrobert 
1586*404b540aSrobert /* This function looks for a new register being defined.
1587*404b540aSrobert    If the destination register is already used by the source,
1588*404b540aSrobert    a new register is not needed.  */
1589*404b540aSrobert 
1590*404b540aSrobert static int
find_set_reg_weight(rtx x)1591*404b540aSrobert find_set_reg_weight (rtx x)
1592*404b540aSrobert {
1593*404b540aSrobert   if (GET_CODE (x) == CLOBBER
1594*404b540aSrobert       && register_operand (SET_DEST (x), VOIDmode))
1595*404b540aSrobert     return 1;
1596*404b540aSrobert   if (GET_CODE (x) == SET
1597*404b540aSrobert       && register_operand (SET_DEST (x), VOIDmode))
1598*404b540aSrobert     {
1599*404b540aSrobert       if (REG_P (SET_DEST (x)))
1600*404b540aSrobert 	{
1601*404b540aSrobert 	  if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
1602*404b540aSrobert 	    return 1;
1603*404b540aSrobert 	  else
1604*404b540aSrobert 	    return 0;
1605*404b540aSrobert 	}
1606*404b540aSrobert       return 1;
1607*404b540aSrobert     }
1608*404b540aSrobert   return 0;
1609*404b540aSrobert }
1610*404b540aSrobert 
1611*404b540aSrobert /* Calculate INSN_REG_WEIGHT for all insns of a block.  */
1612*404b540aSrobert 
1613*404b540aSrobert static void
find_insn_reg_weight(basic_block bb)1614*404b540aSrobert find_insn_reg_weight (basic_block bb)
1615*404b540aSrobert {
1616*404b540aSrobert   rtx insn, next_tail, head, tail;
1617*404b540aSrobert 
1618*404b540aSrobert   get_ebb_head_tail (bb, bb, &head, &tail);
1619*404b540aSrobert   next_tail = NEXT_INSN (tail);
1620*404b540aSrobert 
1621*404b540aSrobert   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1622*404b540aSrobert     find_insn_reg_weight1 (insn);
1623*404b540aSrobert }
1624*404b540aSrobert 
1625*404b540aSrobert /* Calculate INSN_REG_WEIGHT for single instruction.
1626*404b540aSrobert    Separated from find_insn_reg_weight because of need
1627*404b540aSrobert    to initialize new instruction in generate_recovery_code.  */
1628*404b540aSrobert static void
find_insn_reg_weight1(rtx insn)1629*404b540aSrobert find_insn_reg_weight1 (rtx insn)
1630*404b540aSrobert {
1631*404b540aSrobert   int reg_weight = 0;
1632*404b540aSrobert   rtx x;
1633*404b540aSrobert 
1634*404b540aSrobert   /* Handle register life information.  */
1635*404b540aSrobert   if (! INSN_P (insn))
1636*404b540aSrobert     return;
1637*404b540aSrobert 
1638*404b540aSrobert   /* Increment weight for each register born here.  */
1639*404b540aSrobert   x = PATTERN (insn);
1640*404b540aSrobert   reg_weight += find_set_reg_weight (x);
1641*404b540aSrobert   if (GET_CODE (x) == PARALLEL)
1642*404b540aSrobert     {
1643*404b540aSrobert       int j;
1644*404b540aSrobert       for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
1645*404b540aSrobert 	{
1646*404b540aSrobert 	  x = XVECEXP (PATTERN (insn), 0, j);
1647*404b540aSrobert 	  reg_weight += find_set_reg_weight (x);
1648*404b540aSrobert 	}
1649*404b540aSrobert     }
1650*404b540aSrobert   /* Decrement weight for each register that dies here.  */
1651*404b540aSrobert   for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
1652*404b540aSrobert     {
1653*404b540aSrobert       if (REG_NOTE_KIND (x) == REG_DEAD
1654*404b540aSrobert 	  || REG_NOTE_KIND (x) == REG_UNUSED)
1655*404b540aSrobert 	reg_weight--;
1656*404b540aSrobert     }
1657*404b540aSrobert 
1658*404b540aSrobert   INSN_REG_WEIGHT (insn) = reg_weight;
1659*404b540aSrobert }
1660*404b540aSrobert 
1661*404b540aSrobert /* Move insns that became ready to fire from queue to ready list.  */
1662*404b540aSrobert 
1663*404b540aSrobert static void
queue_to_ready(struct ready_list * ready)1664*404b540aSrobert queue_to_ready (struct ready_list *ready)
1665*404b540aSrobert {
1666*404b540aSrobert   rtx insn;
1667*404b540aSrobert   rtx link;
1668*404b540aSrobert 
1669*404b540aSrobert   q_ptr = NEXT_Q (q_ptr);
1670*404b540aSrobert 
1671*404b540aSrobert   /* Add all pending insns that can be scheduled without stalls to the
1672*404b540aSrobert      ready list.  */
1673*404b540aSrobert   for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
1674*404b540aSrobert     {
1675*404b540aSrobert       insn = XEXP (link, 0);
1676*404b540aSrobert       q_size -= 1;
1677*404b540aSrobert 
1678*404b540aSrobert       if (sched_verbose >= 2)
1679*404b540aSrobert 	fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1680*404b540aSrobert 		 (*current_sched_info->print_insn) (insn, 0));
1681*404b540aSrobert 
1682*404b540aSrobert       /* If the ready list is full, delay the insn for 1 cycle.
1683*404b540aSrobert 	 See the comment in schedule_block for the rationale.  */
1684*404b540aSrobert       if (!reload_completed
1685*404b540aSrobert 	  && ready->n_ready > MAX_SCHED_READY_INSNS
1686*404b540aSrobert 	  && !SCHED_GROUP_P (insn))
1687*404b540aSrobert 	{
1688*404b540aSrobert 	  if (sched_verbose >= 2)
1689*404b540aSrobert 	    fprintf (sched_dump, "requeued because ready full\n");
1690*404b540aSrobert 	  queue_insn (insn, 1);
1691*404b540aSrobert 	}
1692*404b540aSrobert       else
1693*404b540aSrobert 	{
1694*404b540aSrobert 	  ready_add (ready, insn, false);
1695*404b540aSrobert 	  if (sched_verbose >= 2)
1696*404b540aSrobert 	    fprintf (sched_dump, "moving to ready without stalls\n");
1697*404b540aSrobert         }
1698*404b540aSrobert     }
1699*404b540aSrobert   free_INSN_LIST_list (&insn_queue[q_ptr]);
1700*404b540aSrobert 
1701*404b540aSrobert   /* If there are no ready insns, stall until one is ready and add all
1702*404b540aSrobert      of the pending insns at that point to the ready list.  */
1703*404b540aSrobert   if (ready->n_ready == 0)
1704*404b540aSrobert     {
1705*404b540aSrobert       int stalls;
1706*404b540aSrobert 
1707*404b540aSrobert       for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
1708*404b540aSrobert 	{
1709*404b540aSrobert 	  if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1710*404b540aSrobert 	    {
1711*404b540aSrobert 	      for (; link; link = XEXP (link, 1))
1712*404b540aSrobert 		{
1713*404b540aSrobert 		  insn = XEXP (link, 0);
1714*404b540aSrobert 		  q_size -= 1;
1715*404b540aSrobert 
1716*404b540aSrobert 		  if (sched_verbose >= 2)
1717*404b540aSrobert 		    fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1718*404b540aSrobert 			     (*current_sched_info->print_insn) (insn, 0));
1719*404b540aSrobert 
1720*404b540aSrobert 		  ready_add (ready, insn, false);
1721*404b540aSrobert 		  if (sched_verbose >= 2)
1722*404b540aSrobert 		    fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
1723*404b540aSrobert 		}
1724*404b540aSrobert 	      free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
1725*404b540aSrobert 
1726*404b540aSrobert 	      advance_one_cycle ();
1727*404b540aSrobert 
1728*404b540aSrobert 	      break;
1729*404b540aSrobert 	    }
1730*404b540aSrobert 
1731*404b540aSrobert 	  advance_one_cycle ();
1732*404b540aSrobert 	}
1733*404b540aSrobert 
1734*404b540aSrobert       q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
1735*404b540aSrobert       clock_var += stalls;
1736*404b540aSrobert     }
1737*404b540aSrobert }
1738*404b540aSrobert 
1739*404b540aSrobert /* Used by early_queue_to_ready.  Determines whether it is "ok" to
1740*404b540aSrobert    prematurely move INSN from the queue to the ready list.  Currently,
1741*404b540aSrobert    if a target defines the hook 'is_costly_dependence', this function
1742*404b540aSrobert    uses the hook to check whether there exist any dependences which are
1743*404b540aSrobert    considered costly by the target, between INSN and other insns that
1744*404b540aSrobert    have already been scheduled.  Dependences are checked up to Y cycles
1745*404b540aSrobert    back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1746*404b540aSrobert    controlling this value.
1747*404b540aSrobert    (Other considerations could be taken into account instead (or in
1748*404b540aSrobert    addition) depending on user flags and target hooks.  */
1749*404b540aSrobert 
1750*404b540aSrobert static bool
ok_for_early_queue_removal(rtx insn)1751*404b540aSrobert ok_for_early_queue_removal (rtx insn)
1752*404b540aSrobert {
1753*404b540aSrobert   int n_cycles;
1754*404b540aSrobert   rtx prev_insn = last_scheduled_insn;
1755*404b540aSrobert 
1756*404b540aSrobert   if (targetm.sched.is_costly_dependence)
1757*404b540aSrobert     {
1758*404b540aSrobert       for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
1759*404b540aSrobert 	{
1760*404b540aSrobert 	  for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
1761*404b540aSrobert 	    {
1762*404b540aSrobert 	      rtx dep_link = 0;
1763*404b540aSrobert 	      int dep_cost;
1764*404b540aSrobert 
1765*404b540aSrobert 	      if (!NOTE_P (prev_insn))
1766*404b540aSrobert 		{
1767*404b540aSrobert 		  dep_link = find_insn_list (insn, INSN_DEPEND (prev_insn));
1768*404b540aSrobert 		  if (dep_link)
1769*404b540aSrobert 		    {
1770*404b540aSrobert 		      dep_cost = insn_cost (prev_insn, dep_link, insn) ;
1771*404b540aSrobert 		      if (targetm.sched.is_costly_dependence (prev_insn, insn,
1772*404b540aSrobert 				dep_link, dep_cost,
1773*404b540aSrobert 				flag_sched_stalled_insns_dep - n_cycles))
1774*404b540aSrobert 			return false;
1775*404b540aSrobert 		    }
1776*404b540aSrobert 		}
1777*404b540aSrobert 
1778*404b540aSrobert 	      if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
1779*404b540aSrobert 		break;
1780*404b540aSrobert 	    }
1781*404b540aSrobert 
1782*404b540aSrobert 	  if (!prev_insn)
1783*404b540aSrobert 	    break;
1784*404b540aSrobert 	  prev_insn = PREV_INSN (prev_insn);
1785*404b540aSrobert 	}
1786*404b540aSrobert     }
1787*404b540aSrobert 
1788*404b540aSrobert   return true;
1789*404b540aSrobert }
1790*404b540aSrobert 
1791*404b540aSrobert 
1792*404b540aSrobert /* Remove insns from the queue, before they become "ready" with respect
1793*404b540aSrobert    to FU latency considerations.  */
1794*404b540aSrobert 
1795*404b540aSrobert static int
early_queue_to_ready(state_t state,struct ready_list * ready)1796*404b540aSrobert early_queue_to_ready (state_t state, struct ready_list *ready)
1797*404b540aSrobert {
1798*404b540aSrobert   rtx insn;
1799*404b540aSrobert   rtx link;
1800*404b540aSrobert   rtx next_link;
1801*404b540aSrobert   rtx prev_link;
1802*404b540aSrobert   bool move_to_ready;
1803*404b540aSrobert   int cost;
1804*404b540aSrobert   state_t temp_state = alloca (dfa_state_size);
1805*404b540aSrobert   int stalls;
1806*404b540aSrobert   int insns_removed = 0;
1807*404b540aSrobert 
1808*404b540aSrobert   /*
1809*404b540aSrobert      Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1810*404b540aSrobert      function:
1811*404b540aSrobert 
1812*404b540aSrobert      X == 0: There is no limit on how many queued insns can be removed
1813*404b540aSrobert              prematurely.  (flag_sched_stalled_insns = -1).
1814*404b540aSrobert 
1815*404b540aSrobert      X >= 1: Only X queued insns can be removed prematurely in each
1816*404b540aSrobert 	     invocation.  (flag_sched_stalled_insns = X).
1817*404b540aSrobert 
1818*404b540aSrobert      Otherwise: Early queue removal is disabled.
1819*404b540aSrobert          (flag_sched_stalled_insns = 0)
1820*404b540aSrobert   */
1821*404b540aSrobert 
1822*404b540aSrobert   if (! flag_sched_stalled_insns)
1823*404b540aSrobert     return 0;
1824*404b540aSrobert 
1825*404b540aSrobert   for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
1826*404b540aSrobert     {
1827*404b540aSrobert       if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1828*404b540aSrobert 	{
1829*404b540aSrobert 	  if (sched_verbose > 6)
1830*404b540aSrobert 	    fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
1831*404b540aSrobert 
1832*404b540aSrobert 	  prev_link = 0;
1833*404b540aSrobert 	  while (link)
1834*404b540aSrobert 	    {
1835*404b540aSrobert 	      next_link = XEXP (link, 1);
1836*404b540aSrobert 	      insn = XEXP (link, 0);
1837*404b540aSrobert 	      if (insn && sched_verbose > 6)
1838*404b540aSrobert 		print_rtl_single (sched_dump, insn);
1839*404b540aSrobert 
1840*404b540aSrobert 	      memcpy (temp_state, state, dfa_state_size);
1841*404b540aSrobert 	      if (recog_memoized (insn) < 0)
1842*404b540aSrobert 		/* non-negative to indicate that it's not ready
1843*404b540aSrobert 		   to avoid infinite Q->R->Q->R... */
1844*404b540aSrobert 		cost = 0;
1845*404b540aSrobert 	      else
1846*404b540aSrobert 		cost = state_transition (temp_state, insn);
1847*404b540aSrobert 
1848*404b540aSrobert 	      if (sched_verbose >= 6)
1849*404b540aSrobert 		fprintf (sched_dump, "transition cost = %d\n", cost);
1850*404b540aSrobert 
1851*404b540aSrobert 	      move_to_ready = false;
1852*404b540aSrobert 	      if (cost < 0)
1853*404b540aSrobert 		{
1854*404b540aSrobert 		  move_to_ready = ok_for_early_queue_removal (insn);
1855*404b540aSrobert 		  if (move_to_ready == true)
1856*404b540aSrobert 		    {
1857*404b540aSrobert 		      /* move from Q to R */
1858*404b540aSrobert 		      q_size -= 1;
1859*404b540aSrobert 		      ready_add (ready, insn, false);
1860*404b540aSrobert 
1861*404b540aSrobert 		      if (prev_link)
1862*404b540aSrobert 			XEXP (prev_link, 1) = next_link;
1863*404b540aSrobert 		      else
1864*404b540aSrobert 			insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
1865*404b540aSrobert 
1866*404b540aSrobert 		      free_INSN_LIST_node (link);
1867*404b540aSrobert 
1868*404b540aSrobert 		      if (sched_verbose >= 2)
1869*404b540aSrobert 			fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
1870*404b540aSrobert 				 (*current_sched_info->print_insn) (insn, 0));
1871*404b540aSrobert 
1872*404b540aSrobert 		      insns_removed++;
1873*404b540aSrobert 		      if (insns_removed == flag_sched_stalled_insns)
1874*404b540aSrobert 			/* Remove no more than flag_sched_stalled_insns insns
1875*404b540aSrobert 			   from Q at a time.  */
1876*404b540aSrobert 			return insns_removed;
1877*404b540aSrobert 		    }
1878*404b540aSrobert 		}
1879*404b540aSrobert 
1880*404b540aSrobert 	      if (move_to_ready == false)
1881*404b540aSrobert 		prev_link = link;
1882*404b540aSrobert 
1883*404b540aSrobert 	      link = next_link;
1884*404b540aSrobert 	    } /* while link */
1885*404b540aSrobert 	} /* if link */
1886*404b540aSrobert 
1887*404b540aSrobert     } /* for stalls.. */
1888*404b540aSrobert 
1889*404b540aSrobert   return insns_removed;
1890*404b540aSrobert }
1891*404b540aSrobert 
1892*404b540aSrobert 
1893*404b540aSrobert /* Print the ready list for debugging purposes.  Callable from debugger.  */
1894*404b540aSrobert 
1895*404b540aSrobert static void
debug_ready_list(struct ready_list * ready)1896*404b540aSrobert debug_ready_list (struct ready_list *ready)
1897*404b540aSrobert {
1898*404b540aSrobert   rtx *p;
1899*404b540aSrobert   int i;
1900*404b540aSrobert 
1901*404b540aSrobert   if (ready->n_ready == 0)
1902*404b540aSrobert     {
1903*404b540aSrobert       fprintf (sched_dump, "\n");
1904*404b540aSrobert       return;
1905*404b540aSrobert     }
1906*404b540aSrobert 
1907*404b540aSrobert   p = ready_lastpos (ready);
1908*404b540aSrobert   for (i = 0; i < ready->n_ready; i++)
1909*404b540aSrobert     fprintf (sched_dump, "  %s", (*current_sched_info->print_insn) (p[i], 0));
1910*404b540aSrobert   fprintf (sched_dump, "\n");
1911*404b540aSrobert }
1912*404b540aSrobert 
1913*404b540aSrobert /* Search INSN for REG_SAVE_NOTE note pairs for
1914*404b540aSrobert    NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
1915*404b540aSrobert    NOTEs.  The REG_SAVE_NOTE note following first one is contains the
1916*404b540aSrobert    saved value for NOTE_BLOCK_NUMBER which is useful for
1917*404b540aSrobert    NOTE_INSN_EH_REGION_{BEG,END} NOTEs.  */
1918*404b540aSrobert 
1919*404b540aSrobert static void
reemit_notes(rtx insn)1920*404b540aSrobert reemit_notes (rtx insn)
1921*404b540aSrobert {
1922*404b540aSrobert   rtx note, last = insn;
1923*404b540aSrobert 
1924*404b540aSrobert   for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1925*404b540aSrobert     {
1926*404b540aSrobert       if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
1927*404b540aSrobert 	{
1928*404b540aSrobert 	  enum insn_note note_type = INTVAL (XEXP (note, 0));
1929*404b540aSrobert 
1930*404b540aSrobert 	  last = emit_note_before (note_type, last);
1931*404b540aSrobert 	  remove_note (insn, note);
1932*404b540aSrobert 	}
1933*404b540aSrobert     }
1934*404b540aSrobert }
1935*404b540aSrobert 
1936*404b540aSrobert /* Move INSN.  Reemit notes if needed.  Update CFG, if needed.  */
1937*404b540aSrobert static void
move_insn(rtx insn)1938*404b540aSrobert move_insn (rtx insn)
1939*404b540aSrobert {
1940*404b540aSrobert   rtx last = last_scheduled_insn;
1941*404b540aSrobert 
1942*404b540aSrobert   if (PREV_INSN (insn) != last)
1943*404b540aSrobert     {
1944*404b540aSrobert       basic_block bb;
1945*404b540aSrobert       rtx note;
1946*404b540aSrobert       int jump_p = 0;
1947*404b540aSrobert 
1948*404b540aSrobert       bb = BLOCK_FOR_INSN (insn);
1949*404b540aSrobert 
1950*404b540aSrobert       /* BB_HEAD is either LABEL or NOTE.  */
1951*404b540aSrobert       gcc_assert (BB_HEAD (bb) != insn);
1952*404b540aSrobert 
1953*404b540aSrobert       if (BB_END (bb) == insn)
1954*404b540aSrobert 	/* If this is last instruction in BB, move end marker one
1955*404b540aSrobert 	   instruction up.  */
1956*404b540aSrobert 	{
1957*404b540aSrobert 	  /* Jumps are always placed at the end of basic block.  */
1958*404b540aSrobert 	  jump_p = control_flow_insn_p (insn);
1959*404b540aSrobert 
1960*404b540aSrobert 	  gcc_assert (!jump_p
1961*404b540aSrobert 		      || ((current_sched_info->flags & SCHED_RGN)
1962*404b540aSrobert 			  && IS_SPECULATION_BRANCHY_CHECK_P (insn))
1963*404b540aSrobert 		      || (current_sched_info->flags & SCHED_EBB));
1964*404b540aSrobert 
1965*404b540aSrobert 	  gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
1966*404b540aSrobert 
1967*404b540aSrobert 	  BB_END (bb) = PREV_INSN (insn);
1968*404b540aSrobert 	}
1969*404b540aSrobert 
1970*404b540aSrobert       gcc_assert (BB_END (bb) != last);
1971*404b540aSrobert 
1972*404b540aSrobert       if (jump_p)
1973*404b540aSrobert 	/* We move the block note along with jump.  */
1974*404b540aSrobert 	{
1975*404b540aSrobert 	  /* NT is needed for assertion below.  */
1976*404b540aSrobert 	  rtx nt = current_sched_info->next_tail;
1977*404b540aSrobert 
1978*404b540aSrobert 	  note = NEXT_INSN (insn);
1979*404b540aSrobert 	  while (NOTE_NOT_BB_P (note) && note != nt)
1980*404b540aSrobert 	    note = NEXT_INSN (note);
1981*404b540aSrobert 
1982*404b540aSrobert 	  if (note != nt
1983*404b540aSrobert 	      && (LABEL_P (note)
1984*404b540aSrobert 		  || BARRIER_P (note)))
1985*404b540aSrobert 	    note = NEXT_INSN (note);
1986*404b540aSrobert 
1987*404b540aSrobert 	  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
1988*404b540aSrobert 	}
1989*404b540aSrobert       else
1990*404b540aSrobert 	note = insn;
1991*404b540aSrobert 
1992*404b540aSrobert       NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
1993*404b540aSrobert       PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
1994*404b540aSrobert 
1995*404b540aSrobert       NEXT_INSN (note) = NEXT_INSN (last);
1996*404b540aSrobert       PREV_INSN (NEXT_INSN (last)) = note;
1997*404b540aSrobert 
1998*404b540aSrobert       NEXT_INSN (last) = insn;
1999*404b540aSrobert       PREV_INSN (insn) = last;
2000*404b540aSrobert 
2001*404b540aSrobert       bb = BLOCK_FOR_INSN (last);
2002*404b540aSrobert 
2003*404b540aSrobert       if (jump_p)
2004*404b540aSrobert 	{
2005*404b540aSrobert 	  fix_jump_move (insn);
2006*404b540aSrobert 
2007*404b540aSrobert 	  if (BLOCK_FOR_INSN (insn) != bb)
2008*404b540aSrobert 	    move_block_after_check (insn);
2009*404b540aSrobert 
2010*404b540aSrobert 	  gcc_assert (BB_END (bb) == last);
2011*404b540aSrobert 	}
2012*404b540aSrobert 
2013*404b540aSrobert       set_block_for_insn (insn, bb);
2014*404b540aSrobert 
2015*404b540aSrobert       /* Update BB_END, if needed.  */
2016*404b540aSrobert       if (BB_END (bb) == last)
2017*404b540aSrobert 	BB_END (bb) = insn;
2018*404b540aSrobert     }
2019*404b540aSrobert 
2020*404b540aSrobert   reemit_notes (insn);
2021*404b540aSrobert 
2022*404b540aSrobert   SCHED_GROUP_P (insn) = 0;
2023*404b540aSrobert }
2024*404b540aSrobert 
2025*404b540aSrobert /* The following structure describe an entry of the stack of choices.  */
2026*404b540aSrobert struct choice_entry
2027*404b540aSrobert {
2028*404b540aSrobert   /* Ordinal number of the issued insn in the ready queue.  */
2029*404b540aSrobert   int index;
2030*404b540aSrobert   /* The number of the rest insns whose issues we should try.  */
2031*404b540aSrobert   int rest;
2032*404b540aSrobert   /* The number of issued essential insns.  */
2033*404b540aSrobert   int n;
2034*404b540aSrobert   /* State after issuing the insn.  */
2035*404b540aSrobert   state_t state;
2036*404b540aSrobert };
2037*404b540aSrobert 
2038*404b540aSrobert /* The following array is used to implement a stack of choices used in
2039*404b540aSrobert    function max_issue.  */
2040*404b540aSrobert static struct choice_entry *choice_stack;
2041*404b540aSrobert 
2042*404b540aSrobert /* The following variable value is number of essential insns issued on
2043*404b540aSrobert    the current cycle.  An insn is essential one if it changes the
2044*404b540aSrobert    processors state.  */
2045*404b540aSrobert static int cycle_issued_insns;
2046*404b540aSrobert 
2047*404b540aSrobert /* The following variable value is maximal number of tries of issuing
2048*404b540aSrobert    insns for the first cycle multipass insn scheduling.  We define
2049*404b540aSrobert    this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE).  We would not
2050*404b540aSrobert    need this constraint if all real insns (with non-negative codes)
2051*404b540aSrobert    had reservations because in this case the algorithm complexity is
2052*404b540aSrobert    O(DFA_LOOKAHEAD**ISSUE_RATE).  Unfortunately, the dfa descriptions
2053*404b540aSrobert    might be incomplete and such insn might occur.  For such
2054*404b540aSrobert    descriptions, the complexity of algorithm (without the constraint)
2055*404b540aSrobert    could achieve DFA_LOOKAHEAD ** N , where N is the queue length.  */
2056*404b540aSrobert static int max_lookahead_tries;
2057*404b540aSrobert 
2058*404b540aSrobert /* The following value is value of hook
2059*404b540aSrobert    `first_cycle_multipass_dfa_lookahead' at the last call of
2060*404b540aSrobert    `max_issue'.  */
2061*404b540aSrobert static int cached_first_cycle_multipass_dfa_lookahead = 0;
2062*404b540aSrobert 
2063*404b540aSrobert /* The following value is value of `issue_rate' at the last call of
2064*404b540aSrobert    `sched_init'.  */
2065*404b540aSrobert static int cached_issue_rate = 0;
2066*404b540aSrobert 
2067*404b540aSrobert /* The following function returns maximal (or close to maximal) number
2068*404b540aSrobert    of insns which can be issued on the same cycle and one of which
2069*404b540aSrobert    insns is insns with the best rank (the first insn in READY).  To
2070*404b540aSrobert    make this function tries different samples of ready insns.  READY
2071*404b540aSrobert    is current queue `ready'.  Global array READY_TRY reflects what
2072*404b540aSrobert    insns are already issued in this try.  MAX_POINTS is the sum of points
2073*404b540aSrobert    of all instructions in READY.  The function stops immediately,
2074*404b540aSrobert    if it reached the such a solution, that all instruction can be issued.
2075*404b540aSrobert    INDEX will contain index of the best insn in READY.  The following
2076*404b540aSrobert    function is used only for first cycle multipass scheduling.  */
2077*404b540aSrobert static int
max_issue(struct ready_list * ready,int * index,int max_points)2078*404b540aSrobert max_issue (struct ready_list *ready, int *index, int max_points)
2079*404b540aSrobert {
2080*404b540aSrobert   int n, i, all, n_ready, best, delay, tries_num, points = -1;
2081*404b540aSrobert   struct choice_entry *top;
2082*404b540aSrobert   rtx insn;
2083*404b540aSrobert 
2084*404b540aSrobert   best = 0;
2085*404b540aSrobert   memcpy (choice_stack->state, curr_state, dfa_state_size);
2086*404b540aSrobert   top = choice_stack;
2087*404b540aSrobert   top->rest = cached_first_cycle_multipass_dfa_lookahead;
2088*404b540aSrobert   top->n = 0;
2089*404b540aSrobert   n_ready = ready->n_ready;
2090*404b540aSrobert   for (all = i = 0; i < n_ready; i++)
2091*404b540aSrobert     if (!ready_try [i])
2092*404b540aSrobert       all++;
2093*404b540aSrobert   i = 0;
2094*404b540aSrobert   tries_num = 0;
2095*404b540aSrobert   for (;;)
2096*404b540aSrobert     {
2097*404b540aSrobert       if (top->rest == 0 || i >= n_ready)
2098*404b540aSrobert 	{
2099*404b540aSrobert 	  if (top == choice_stack)
2100*404b540aSrobert 	    break;
2101*404b540aSrobert 	  if (best < top - choice_stack && ready_try [0])
2102*404b540aSrobert 	    {
2103*404b540aSrobert 	      best = top - choice_stack;
2104*404b540aSrobert 	      *index = choice_stack [1].index;
2105*404b540aSrobert 	      points = top->n;
2106*404b540aSrobert 	      if (top->n == max_points || best == all)
2107*404b540aSrobert 		break;
2108*404b540aSrobert 	    }
2109*404b540aSrobert 	  i = top->index;
2110*404b540aSrobert 	  ready_try [i] = 0;
2111*404b540aSrobert 	  top--;
2112*404b540aSrobert 	  memcpy (curr_state, top->state, dfa_state_size);
2113*404b540aSrobert 	}
2114*404b540aSrobert       else if (!ready_try [i])
2115*404b540aSrobert 	{
2116*404b540aSrobert 	  tries_num++;
2117*404b540aSrobert 	  if (tries_num > max_lookahead_tries)
2118*404b540aSrobert 	    break;
2119*404b540aSrobert 	  insn = ready_element (ready, i);
2120*404b540aSrobert 	  delay = state_transition (curr_state, insn);
2121*404b540aSrobert 	  if (delay < 0)
2122*404b540aSrobert 	    {
2123*404b540aSrobert 	      if (state_dead_lock_p (curr_state))
2124*404b540aSrobert 		top->rest = 0;
2125*404b540aSrobert 	      else
2126*404b540aSrobert 		top->rest--;
2127*404b540aSrobert 	      n = top->n;
2128*404b540aSrobert 	      if (memcmp (top->state, curr_state, dfa_state_size) != 0)
2129*404b540aSrobert 		n += ISSUE_POINTS (insn);
2130*404b540aSrobert 	      top++;
2131*404b540aSrobert 	      top->rest = cached_first_cycle_multipass_dfa_lookahead;
2132*404b540aSrobert 	      top->index = i;
2133*404b540aSrobert 	      top->n = n;
2134*404b540aSrobert 	      memcpy (top->state, curr_state, dfa_state_size);
2135*404b540aSrobert 	      ready_try [i] = 1;
2136*404b540aSrobert 	      i = -1;
2137*404b540aSrobert 	    }
2138*404b540aSrobert 	}
2139*404b540aSrobert       i++;
2140*404b540aSrobert     }
2141*404b540aSrobert   while (top != choice_stack)
2142*404b540aSrobert     {
2143*404b540aSrobert       ready_try [top->index] = 0;
2144*404b540aSrobert       top--;
2145*404b540aSrobert     }
2146*404b540aSrobert   memcpy (curr_state, choice_stack->state, dfa_state_size);
2147*404b540aSrobert 
2148*404b540aSrobert   if (sched_verbose >= 4)
2149*404b540aSrobert     fprintf (sched_dump, ";;\t\tChoosed insn : %s; points: %d/%d\n",
2150*404b540aSrobert 	     (*current_sched_info->print_insn) (ready_element (ready, *index),
2151*404b540aSrobert 						0),
2152*404b540aSrobert 	     points, max_points);
2153*404b540aSrobert 
2154*404b540aSrobert   return best;
2155*404b540aSrobert }
2156*404b540aSrobert 
2157*404b540aSrobert /* The following function chooses insn from READY and modifies
2158*404b540aSrobert    *N_READY and READY.  The following function is used only for first
2159*404b540aSrobert    cycle multipass scheduling.  */
2160*404b540aSrobert 
2161*404b540aSrobert static rtx
choose_ready(struct ready_list * ready)2162*404b540aSrobert choose_ready (struct ready_list *ready)
2163*404b540aSrobert {
2164*404b540aSrobert   int lookahead = 0;
2165*404b540aSrobert 
2166*404b540aSrobert   if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2167*404b540aSrobert     lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2168*404b540aSrobert   if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)))
2169*404b540aSrobert     return ready_remove_first (ready);
2170*404b540aSrobert   else
2171*404b540aSrobert     {
2172*404b540aSrobert       /* Try to choose the better insn.  */
2173*404b540aSrobert       int index = 0, i, n;
2174*404b540aSrobert       rtx insn;
2175*404b540aSrobert       int more_issue, max_points, try_data = 1, try_control = 1;
2176*404b540aSrobert 
2177*404b540aSrobert       if (cached_first_cycle_multipass_dfa_lookahead != lookahead)
2178*404b540aSrobert 	{
2179*404b540aSrobert 	  cached_first_cycle_multipass_dfa_lookahead = lookahead;
2180*404b540aSrobert 	  max_lookahead_tries = 100;
2181*404b540aSrobert 	  for (i = 0; i < issue_rate; i++)
2182*404b540aSrobert 	    max_lookahead_tries *= lookahead;
2183*404b540aSrobert 	}
2184*404b540aSrobert       insn = ready_element (ready, 0);
2185*404b540aSrobert       if (INSN_CODE (insn) < 0)
2186*404b540aSrobert 	return ready_remove_first (ready);
2187*404b540aSrobert 
2188*404b540aSrobert       if (spec_info
2189*404b540aSrobert 	  && spec_info->flags & (PREFER_NON_DATA_SPEC
2190*404b540aSrobert 				 | PREFER_NON_CONTROL_SPEC))
2191*404b540aSrobert 	{
2192*404b540aSrobert 	  for (i = 0, n = ready->n_ready; i < n; i++)
2193*404b540aSrobert 	    {
2194*404b540aSrobert 	      rtx x;
2195*404b540aSrobert 	      ds_t s;
2196*404b540aSrobert 
2197*404b540aSrobert 	      x = ready_element (ready, i);
2198*404b540aSrobert 	      s = TODO_SPEC (x);
2199*404b540aSrobert 
2200*404b540aSrobert 	      if (spec_info->flags & PREFER_NON_DATA_SPEC
2201*404b540aSrobert 		  && !(s & DATA_SPEC))
2202*404b540aSrobert 		{
2203*404b540aSrobert 		  try_data = 0;
2204*404b540aSrobert 		  if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2205*404b540aSrobert 		      || !try_control)
2206*404b540aSrobert 		    break;
2207*404b540aSrobert 		}
2208*404b540aSrobert 
2209*404b540aSrobert 	      if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2210*404b540aSrobert 		  && !(s & CONTROL_SPEC))
2211*404b540aSrobert 		{
2212*404b540aSrobert 		  try_control = 0;
2213*404b540aSrobert 		  if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2214*404b540aSrobert 		    break;
2215*404b540aSrobert 		}
2216*404b540aSrobert 	    }
2217*404b540aSrobert 	}
2218*404b540aSrobert 
2219*404b540aSrobert       if ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2220*404b540aSrobert 	  || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2221*404b540aSrobert 	  || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2222*404b540aSrobert 	      && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2223*404b540aSrobert 	      (insn)))
2224*404b540aSrobert 	/* Discard speculative instruction that stands first in the ready
2225*404b540aSrobert 	   list.  */
2226*404b540aSrobert 	{
2227*404b540aSrobert 	  change_queue_index (insn, 1);
2228*404b540aSrobert 	  return 0;
2229*404b540aSrobert 	}
2230*404b540aSrobert 
2231*404b540aSrobert       max_points = ISSUE_POINTS (insn);
2232*404b540aSrobert       more_issue = issue_rate - cycle_issued_insns - 1;
2233*404b540aSrobert 
2234*404b540aSrobert       for (i = 1; i < ready->n_ready; i++)
2235*404b540aSrobert 	{
2236*404b540aSrobert 	  insn = ready_element (ready, i);
2237*404b540aSrobert 	  ready_try [i]
2238*404b540aSrobert 	    = (INSN_CODE (insn) < 0
2239*404b540aSrobert                || (!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2240*404b540aSrobert                || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2241*404b540aSrobert 	       || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2242*404b540aSrobert 		   && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2243*404b540aSrobert 		   (insn)));
2244*404b540aSrobert 
2245*404b540aSrobert 	  if (!ready_try [i] && more_issue-- > 0)
2246*404b540aSrobert 	    max_points += ISSUE_POINTS (insn);
2247*404b540aSrobert 	}
2248*404b540aSrobert 
2249*404b540aSrobert       if (max_issue (ready, &index, max_points) == 0)
2250*404b540aSrobert 	return ready_remove_first (ready);
2251*404b540aSrobert       else
2252*404b540aSrobert 	return ready_remove (ready, index);
2253*404b540aSrobert     }
2254*404b540aSrobert }
2255*404b540aSrobert 
2256*404b540aSrobert /* Use forward list scheduling to rearrange insns of block pointed to by
2257*404b540aSrobert    TARGET_BB, possibly bringing insns from subsequent blocks in the same
2258*404b540aSrobert    region.  */
2259*404b540aSrobert 
2260*404b540aSrobert void
schedule_block(basic_block * target_bb,int rgn_n_insns1)2261*404b540aSrobert schedule_block (basic_block *target_bb, int rgn_n_insns1)
2262*404b540aSrobert {
2263*404b540aSrobert   struct ready_list ready;
2264*404b540aSrobert   int i, first_cycle_insn_p;
2265*404b540aSrobert   int can_issue_more;
2266*404b540aSrobert   state_t temp_state = NULL;  /* It is used for multipass scheduling.  */
2267*404b540aSrobert   int sort_p, advance, start_clock_var;
2268*404b540aSrobert 
2269*404b540aSrobert   /* Head/tail info for this block.  */
2270*404b540aSrobert   rtx prev_head = current_sched_info->prev_head;
2271*404b540aSrobert   rtx next_tail = current_sched_info->next_tail;
2272*404b540aSrobert   rtx head = NEXT_INSN (prev_head);
2273*404b540aSrobert   rtx tail = PREV_INSN (next_tail);
2274*404b540aSrobert 
2275*404b540aSrobert   /* We used to have code to avoid getting parameters moved from hard
2276*404b540aSrobert      argument registers into pseudos.
2277*404b540aSrobert 
2278*404b540aSrobert      However, it was removed when it proved to be of marginal benefit
2279*404b540aSrobert      and caused problems because schedule_block and compute_forward_dependences
2280*404b540aSrobert      had different notions of what the "head" insn was.  */
2281*404b540aSrobert 
2282*404b540aSrobert   gcc_assert (head != tail || INSN_P (head));
2283*404b540aSrobert 
2284*404b540aSrobert   added_recovery_block_p = false;
2285*404b540aSrobert 
2286*404b540aSrobert   /* Debug info.  */
2287*404b540aSrobert   if (sched_verbose)
2288*404b540aSrobert     dump_new_block_header (0, *target_bb, head, tail);
2289*404b540aSrobert 
2290*404b540aSrobert   state_reset (curr_state);
2291*404b540aSrobert 
2292*404b540aSrobert   /* Allocate the ready list.  */
2293*404b540aSrobert   readyp = &ready;
2294*404b540aSrobert   ready.vec = NULL;
2295*404b540aSrobert   ready_try = NULL;
2296*404b540aSrobert   choice_stack = NULL;
2297*404b540aSrobert 
2298*404b540aSrobert   rgn_n_insns = -1;
2299*404b540aSrobert   extend_ready (rgn_n_insns1 + 1);
2300*404b540aSrobert 
2301*404b540aSrobert   ready.first = ready.veclen - 1;
2302*404b540aSrobert   ready.n_ready = 0;
2303*404b540aSrobert 
2304*404b540aSrobert   /* It is used for first cycle multipass scheduling.  */
2305*404b540aSrobert   temp_state = alloca (dfa_state_size);
2306*404b540aSrobert 
2307*404b540aSrobert   if (targetm.sched.md_init)
2308*404b540aSrobert     targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
2309*404b540aSrobert 
2310*404b540aSrobert   /* We start inserting insns after PREV_HEAD.  */
2311*404b540aSrobert   last_scheduled_insn = prev_head;
2312*404b540aSrobert 
2313*404b540aSrobert   gcc_assert (NOTE_P (last_scheduled_insn)
2314*404b540aSrobert 	      && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2315*404b540aSrobert 
2316*404b540aSrobert   /* Initialize INSN_QUEUE.  Q_SIZE is the total number of insns in the
2317*404b540aSrobert      queue.  */
2318*404b540aSrobert   q_ptr = 0;
2319*404b540aSrobert   q_size = 0;
2320*404b540aSrobert 
2321*404b540aSrobert   insn_queue = alloca ((max_insn_queue_index + 1) * sizeof (rtx));
2322*404b540aSrobert   memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2323*404b540aSrobert 
2324*404b540aSrobert   /* Start just before the beginning of time.  */
2325*404b540aSrobert   clock_var = -1;
2326*404b540aSrobert 
2327*404b540aSrobert   /* We need queue and ready lists and clock_var be initialized
2328*404b540aSrobert      in try_ready () (which is called through init_ready_list ()).  */
2329*404b540aSrobert   (*current_sched_info->init_ready_list) ();
2330*404b540aSrobert 
2331*404b540aSrobert   /* The algorithm is O(n^2) in the number of ready insns at any given
2332*404b540aSrobert      time in the worst case.  Before reload we are more likely to have
2333*404b540aSrobert      big lists so truncate them to a reasonable size.  */
2334*404b540aSrobert   if (!reload_completed && ready.n_ready > MAX_SCHED_READY_INSNS)
2335*404b540aSrobert     {
2336*404b540aSrobert       ready_sort (&ready);
2337*404b540aSrobert 
2338*404b540aSrobert       /* Find first free-standing insn past MAX_SCHED_READY_INSNS.  */
2339*404b540aSrobert       for (i = MAX_SCHED_READY_INSNS; i < ready.n_ready; i++)
2340*404b540aSrobert 	if (!SCHED_GROUP_P (ready_element (&ready, i)))
2341*404b540aSrobert 	  break;
2342*404b540aSrobert 
2343*404b540aSrobert       if (sched_verbose >= 2)
2344*404b540aSrobert 	{
2345*404b540aSrobert 	  fprintf (sched_dump,
2346*404b540aSrobert 		   ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2347*404b540aSrobert 	  fprintf (sched_dump,
2348*404b540aSrobert 		   ";;\t\t before reload => truncated to %d insns\n", i);
2349*404b540aSrobert 	}
2350*404b540aSrobert 
2351*404b540aSrobert       /* Delay all insns past it for 1 cycle.  */
2352*404b540aSrobert       while (i < ready.n_ready)
2353*404b540aSrobert 	queue_insn (ready_remove (&ready, i), 1);
2354*404b540aSrobert     }
2355*404b540aSrobert 
2356*404b540aSrobert   /* Now we can restore basic block notes and maintain precise cfg.  */
2357*404b540aSrobert   restore_bb_notes (*target_bb);
2358*404b540aSrobert 
2359*404b540aSrobert   last_clock_var = -1;
2360*404b540aSrobert 
2361*404b540aSrobert   advance = 0;
2362*404b540aSrobert 
2363*404b540aSrobert   sort_p = TRUE;
2364*404b540aSrobert   /* Loop until all the insns in BB are scheduled.  */
2365*404b540aSrobert   while ((*current_sched_info->schedule_more_p) ())
2366*404b540aSrobert     {
2367*404b540aSrobert       do
2368*404b540aSrobert 	{
2369*404b540aSrobert 	  start_clock_var = clock_var;
2370*404b540aSrobert 
2371*404b540aSrobert 	  clock_var++;
2372*404b540aSrobert 
2373*404b540aSrobert 	  advance_one_cycle ();
2374*404b540aSrobert 
2375*404b540aSrobert 	  /* Add to the ready list all pending insns that can be issued now.
2376*404b540aSrobert 	     If there are no ready insns, increment clock until one
2377*404b540aSrobert 	     is ready and add all pending insns at that point to the ready
2378*404b540aSrobert 	     list.  */
2379*404b540aSrobert 	  queue_to_ready (&ready);
2380*404b540aSrobert 
2381*404b540aSrobert 	  gcc_assert (ready.n_ready);
2382*404b540aSrobert 
2383*404b540aSrobert 	  if (sched_verbose >= 2)
2384*404b540aSrobert 	    {
2385*404b540aSrobert 	      fprintf (sched_dump, ";;\t\tReady list after queue_to_ready:  ");
2386*404b540aSrobert 	      debug_ready_list (&ready);
2387*404b540aSrobert 	    }
2388*404b540aSrobert 	  advance -= clock_var - start_clock_var;
2389*404b540aSrobert 	}
2390*404b540aSrobert       while (advance > 0);
2391*404b540aSrobert 
2392*404b540aSrobert       if (sort_p)
2393*404b540aSrobert 	{
2394*404b540aSrobert 	  /* Sort the ready list based on priority.  */
2395*404b540aSrobert 	  ready_sort (&ready);
2396*404b540aSrobert 
2397*404b540aSrobert 	  if (sched_verbose >= 2)
2398*404b540aSrobert 	    {
2399*404b540aSrobert 	      fprintf (sched_dump, ";;\t\tReady list after ready_sort:  ");
2400*404b540aSrobert 	      debug_ready_list (&ready);
2401*404b540aSrobert 	    }
2402*404b540aSrobert 	}
2403*404b540aSrobert 
2404*404b540aSrobert       /* Allow the target to reorder the list, typically for
2405*404b540aSrobert 	 better instruction bundling.  */
2406*404b540aSrobert       if (sort_p && targetm.sched.reorder
2407*404b540aSrobert 	  && (ready.n_ready == 0
2408*404b540aSrobert 	      || !SCHED_GROUP_P (ready_element (&ready, 0))))
2409*404b540aSrobert 	can_issue_more =
2410*404b540aSrobert 	  targetm.sched.reorder (sched_dump, sched_verbose,
2411*404b540aSrobert 				 ready_lastpos (&ready),
2412*404b540aSrobert 				 &ready.n_ready, clock_var);
2413*404b540aSrobert       else
2414*404b540aSrobert 	can_issue_more = issue_rate;
2415*404b540aSrobert 
2416*404b540aSrobert       first_cycle_insn_p = 1;
2417*404b540aSrobert       cycle_issued_insns = 0;
2418*404b540aSrobert       for (;;)
2419*404b540aSrobert 	{
2420*404b540aSrobert 	  rtx insn;
2421*404b540aSrobert 	  int cost;
2422*404b540aSrobert 	  bool asm_p = false;
2423*404b540aSrobert 
2424*404b540aSrobert 	  if (sched_verbose >= 2)
2425*404b540aSrobert 	    {
2426*404b540aSrobert 	      fprintf (sched_dump, ";;\tReady list (t = %3d):  ",
2427*404b540aSrobert 		       clock_var);
2428*404b540aSrobert 	      debug_ready_list (&ready);
2429*404b540aSrobert 	    }
2430*404b540aSrobert 
2431*404b540aSrobert 	  if (ready.n_ready == 0
2432*404b540aSrobert 	      && can_issue_more
2433*404b540aSrobert 	      && reload_completed)
2434*404b540aSrobert 	    {
2435*404b540aSrobert 	      /* Allow scheduling insns directly from the queue in case
2436*404b540aSrobert 		 there's nothing better to do (ready list is empty) but
2437*404b540aSrobert 		 there are still vacant dispatch slots in the current cycle.  */
2438*404b540aSrobert 	      if (sched_verbose >= 6)
2439*404b540aSrobert 		fprintf(sched_dump,";;\t\tSecond chance\n");
2440*404b540aSrobert 	      memcpy (temp_state, curr_state, dfa_state_size);
2441*404b540aSrobert 	      if (early_queue_to_ready (temp_state, &ready))
2442*404b540aSrobert 		ready_sort (&ready);
2443*404b540aSrobert 	    }
2444*404b540aSrobert 
2445*404b540aSrobert 	  if (ready.n_ready == 0 || !can_issue_more
2446*404b540aSrobert 	      || state_dead_lock_p (curr_state)
2447*404b540aSrobert 	      || !(*current_sched_info->schedule_more_p) ())
2448*404b540aSrobert 	    break;
2449*404b540aSrobert 
2450*404b540aSrobert 	  /* Select and remove the insn from the ready list.  */
2451*404b540aSrobert 	  if (sort_p)
2452*404b540aSrobert 	    {
2453*404b540aSrobert 	      insn = choose_ready (&ready);
2454*404b540aSrobert 	      if (!insn)
2455*404b540aSrobert 		continue;
2456*404b540aSrobert 	    }
2457*404b540aSrobert 	  else
2458*404b540aSrobert 	    insn = ready_remove_first (&ready);
2459*404b540aSrobert 
2460*404b540aSrobert 	  if (targetm.sched.dfa_new_cycle
2461*404b540aSrobert 	      && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
2462*404b540aSrobert 					      insn, last_clock_var,
2463*404b540aSrobert 					      clock_var, &sort_p))
2464*404b540aSrobert 	    /* SORT_P is used by the target to override sorting
2465*404b540aSrobert 	       of the ready list.  This is needed when the target
2466*404b540aSrobert 	       has modified its internal structures expecting that
2467*404b540aSrobert 	       the insn will be issued next.  As we need the insn
2468*404b540aSrobert 	       to have the highest priority (so it will be returned by
2469*404b540aSrobert 	       the ready_remove_first call above), we invoke
2470*404b540aSrobert 	       ready_add (&ready, insn, true).
2471*404b540aSrobert 	       But, still, there is one issue: INSN can be later
2472*404b540aSrobert 	       discarded by scheduler's front end through
2473*404b540aSrobert 	       current_sched_info->can_schedule_ready_p, hence, won't
2474*404b540aSrobert 	       be issued next.  */
2475*404b540aSrobert 	    {
2476*404b540aSrobert 	      ready_add (&ready, insn, true);
2477*404b540aSrobert               break;
2478*404b540aSrobert 	    }
2479*404b540aSrobert 
2480*404b540aSrobert 	  sort_p = TRUE;
2481*404b540aSrobert 	  memcpy (temp_state, curr_state, dfa_state_size);
2482*404b540aSrobert 	  if (recog_memoized (insn) < 0)
2483*404b540aSrobert 	    {
2484*404b540aSrobert 	      asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
2485*404b540aSrobert 		       || asm_noperands (PATTERN (insn)) >= 0);
2486*404b540aSrobert 	      if (!first_cycle_insn_p && asm_p)
2487*404b540aSrobert 		/* This is asm insn which is tryed to be issued on the
2488*404b540aSrobert 		   cycle not first.  Issue it on the next cycle.  */
2489*404b540aSrobert 		cost = 1;
2490*404b540aSrobert 	      else
2491*404b540aSrobert 		/* A USE insn, or something else we don't need to
2492*404b540aSrobert 		   understand.  We can't pass these directly to
2493*404b540aSrobert 		   state_transition because it will trigger a
2494*404b540aSrobert 		   fatal error for unrecognizable insns.  */
2495*404b540aSrobert 		cost = 0;
2496*404b540aSrobert 	    }
2497*404b540aSrobert 	  else
2498*404b540aSrobert 	    {
2499*404b540aSrobert 	      cost = state_transition (temp_state, insn);
2500*404b540aSrobert 	      if (cost < 0)
2501*404b540aSrobert 		cost = 0;
2502*404b540aSrobert 	      else if (cost == 0)
2503*404b540aSrobert 		cost = 1;
2504*404b540aSrobert 	    }
2505*404b540aSrobert 
2506*404b540aSrobert 	  if (cost >= 1)
2507*404b540aSrobert 	    {
2508*404b540aSrobert 	      queue_insn (insn, cost);
2509*404b540aSrobert  	      if (SCHED_GROUP_P (insn))
2510*404b540aSrobert  		{
2511*404b540aSrobert  		  advance = cost;
2512*404b540aSrobert  		  break;
2513*404b540aSrobert  		}
2514*404b540aSrobert 
2515*404b540aSrobert 	      continue;
2516*404b540aSrobert 	    }
2517*404b540aSrobert 
2518*404b540aSrobert 	  if (current_sched_info->can_schedule_ready_p
2519*404b540aSrobert 	      && ! (*current_sched_info->can_schedule_ready_p) (insn))
2520*404b540aSrobert 	    /* We normally get here only if we don't want to move
2521*404b540aSrobert 	       insn from the split block.  */
2522*404b540aSrobert 	    {
2523*404b540aSrobert 	      TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
2524*404b540aSrobert 	      continue;
2525*404b540aSrobert 	    }
2526*404b540aSrobert 
2527*404b540aSrobert 	  /* DECISION is made.  */
2528*404b540aSrobert 
2529*404b540aSrobert           if (TODO_SPEC (insn) & SPECULATIVE)
2530*404b540aSrobert             generate_recovery_code (insn);
2531*404b540aSrobert 
2532*404b540aSrobert 	  if (control_flow_insn_p (last_scheduled_insn)
2533*404b540aSrobert 	      /* This is used to to switch basic blocks by request
2534*404b540aSrobert 		 from scheduler front-end (actually, sched-ebb.c only).
2535*404b540aSrobert 		 This is used to process blocks with single fallthru
2536*404b540aSrobert 		 edge.  If succeeding block has jump, it [jump] will try
2537*404b540aSrobert 		 move at the end of current bb, thus corrupting CFG.  */
2538*404b540aSrobert 	      || current_sched_info->advance_target_bb (*target_bb, insn))
2539*404b540aSrobert 	    {
2540*404b540aSrobert 	      *target_bb = current_sched_info->advance_target_bb
2541*404b540aSrobert 		(*target_bb, 0);
2542*404b540aSrobert 
2543*404b540aSrobert 	      if (sched_verbose)
2544*404b540aSrobert 		{
2545*404b540aSrobert 		  rtx x;
2546*404b540aSrobert 
2547*404b540aSrobert 		  x = next_real_insn (last_scheduled_insn);
2548*404b540aSrobert 		  gcc_assert (x);
2549*404b540aSrobert 		  dump_new_block_header (1, *target_bb, x, tail);
2550*404b540aSrobert 		}
2551*404b540aSrobert 
2552*404b540aSrobert 	      last_scheduled_insn = bb_note (*target_bb);
2553*404b540aSrobert 	    }
2554*404b540aSrobert 
2555*404b540aSrobert 	  /* Update counters, etc in the scheduler's front end.  */
2556*404b540aSrobert 	  (*current_sched_info->begin_schedule_ready) (insn,
2557*404b540aSrobert 						       last_scheduled_insn);
2558*404b540aSrobert 
2559*404b540aSrobert 	  move_insn (insn);
2560*404b540aSrobert 	  last_scheduled_insn = insn;
2561*404b540aSrobert 
2562*404b540aSrobert 	  if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
2563*404b540aSrobert             {
2564*404b540aSrobert               cycle_issued_insns++;
2565*404b540aSrobert               memcpy (curr_state, temp_state, dfa_state_size);
2566*404b540aSrobert             }
2567*404b540aSrobert 
2568*404b540aSrobert 	  if (targetm.sched.variable_issue)
2569*404b540aSrobert 	    can_issue_more =
2570*404b540aSrobert 	      targetm.sched.variable_issue (sched_dump, sched_verbose,
2571*404b540aSrobert 					       insn, can_issue_more);
2572*404b540aSrobert 	  /* A naked CLOBBER or USE generates no instruction, so do
2573*404b540aSrobert 	     not count them against the issue rate.  */
2574*404b540aSrobert 	  else if (GET_CODE (PATTERN (insn)) != USE
2575*404b540aSrobert 		   && GET_CODE (PATTERN (insn)) != CLOBBER)
2576*404b540aSrobert 	    can_issue_more--;
2577*404b540aSrobert 
2578*404b540aSrobert 	  advance = schedule_insn (insn);
2579*404b540aSrobert 
2580*404b540aSrobert 	  /* After issuing an asm insn we should start a new cycle.  */
2581*404b540aSrobert 	  if (advance == 0 && asm_p)
2582*404b540aSrobert 	    advance = 1;
2583*404b540aSrobert 	  if (advance != 0)
2584*404b540aSrobert 	    break;
2585*404b540aSrobert 
2586*404b540aSrobert 	  first_cycle_insn_p = 0;
2587*404b540aSrobert 
2588*404b540aSrobert 	  /* Sort the ready list based on priority.  This must be
2589*404b540aSrobert 	     redone here, as schedule_insn may have readied additional
2590*404b540aSrobert 	     insns that will not be sorted correctly.  */
2591*404b540aSrobert 	  if (ready.n_ready > 0)
2592*404b540aSrobert 	    ready_sort (&ready);
2593*404b540aSrobert 
2594*404b540aSrobert 	  if (targetm.sched.reorder2
2595*404b540aSrobert 	      && (ready.n_ready == 0
2596*404b540aSrobert 		  || !SCHED_GROUP_P (ready_element (&ready, 0))))
2597*404b540aSrobert 	    {
2598*404b540aSrobert 	      can_issue_more =
2599*404b540aSrobert 		targetm.sched.reorder2 (sched_dump, sched_verbose,
2600*404b540aSrobert 					ready.n_ready
2601*404b540aSrobert 					? ready_lastpos (&ready) : NULL,
2602*404b540aSrobert 					&ready.n_ready, clock_var);
2603*404b540aSrobert 	    }
2604*404b540aSrobert 	}
2605*404b540aSrobert     }
2606*404b540aSrobert 
2607*404b540aSrobert   /* Debug info.  */
2608*404b540aSrobert   if (sched_verbose)
2609*404b540aSrobert     {
2610*404b540aSrobert       fprintf (sched_dump, ";;\tReady list (final):  ");
2611*404b540aSrobert       debug_ready_list (&ready);
2612*404b540aSrobert     }
2613*404b540aSrobert 
2614*404b540aSrobert   if (current_sched_info->queue_must_finish_empty)
2615*404b540aSrobert     /* Sanity check -- queue must be empty now.  Meaningless if region has
2616*404b540aSrobert        multiple bbs.  */
2617*404b540aSrobert     gcc_assert (!q_size && !ready.n_ready);
2618*404b540aSrobert   else
2619*404b540aSrobert     {
2620*404b540aSrobert       /* We must maintain QUEUE_INDEX between blocks in region.  */
2621*404b540aSrobert       for (i = ready.n_ready - 1; i >= 0; i--)
2622*404b540aSrobert 	{
2623*404b540aSrobert 	  rtx x;
2624*404b540aSrobert 
2625*404b540aSrobert 	  x = ready_element (&ready, i);
2626*404b540aSrobert 	  QUEUE_INDEX (x) = QUEUE_NOWHERE;
2627*404b540aSrobert 	  TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2628*404b540aSrobert 	}
2629*404b540aSrobert 
2630*404b540aSrobert       if (q_size)
2631*404b540aSrobert 	for (i = 0; i <= max_insn_queue_index; i++)
2632*404b540aSrobert 	  {
2633*404b540aSrobert 	    rtx link;
2634*404b540aSrobert 	    for (link = insn_queue[i]; link; link = XEXP (link, 1))
2635*404b540aSrobert 	      {
2636*404b540aSrobert 		rtx x;
2637*404b540aSrobert 
2638*404b540aSrobert 		x = XEXP (link, 0);
2639*404b540aSrobert 		QUEUE_INDEX (x) = QUEUE_NOWHERE;
2640*404b540aSrobert 		TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2641*404b540aSrobert 	      }
2642*404b540aSrobert 	    free_INSN_LIST_list (&insn_queue[i]);
2643*404b540aSrobert 	  }
2644*404b540aSrobert     }
2645*404b540aSrobert 
2646*404b540aSrobert   if (!current_sched_info->queue_must_finish_empty
2647*404b540aSrobert       || added_recovery_block_p)
2648*404b540aSrobert     {
2649*404b540aSrobert       /* INSN_TICK (minimum clock tick at which the insn becomes
2650*404b540aSrobert          ready) may be not correct for the insn in the subsequent
2651*404b540aSrobert          blocks of the region.  We should use a correct value of
2652*404b540aSrobert          `clock_var' or modify INSN_TICK.  It is better to keep
2653*404b540aSrobert          clock_var value equal to 0 at the start of a basic block.
2654*404b540aSrobert          Therefore we modify INSN_TICK here.  */
2655*404b540aSrobert       fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
2656*404b540aSrobert     }
2657*404b540aSrobert 
2658*404b540aSrobert   if (targetm.sched.md_finish)
2659*404b540aSrobert     targetm.sched.md_finish (sched_dump, sched_verbose);
2660*404b540aSrobert 
2661*404b540aSrobert   /* Update head/tail boundaries.  */
2662*404b540aSrobert   head = NEXT_INSN (prev_head);
2663*404b540aSrobert   tail = last_scheduled_insn;
2664*404b540aSrobert 
2665*404b540aSrobert   /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2666*404b540aSrobert      previously found among the insns.  Insert them at the beginning
2667*404b540aSrobert      of the insns.  */
2668*404b540aSrobert   if (note_list != 0)
2669*404b540aSrobert     {
2670*404b540aSrobert       basic_block head_bb = BLOCK_FOR_INSN (head);
2671*404b540aSrobert       rtx note_head = note_list;
2672*404b540aSrobert 
2673*404b540aSrobert       while (PREV_INSN (note_head))
2674*404b540aSrobert 	{
2675*404b540aSrobert 	  set_block_for_insn (note_head, head_bb);
2676*404b540aSrobert 	  note_head = PREV_INSN (note_head);
2677*404b540aSrobert 	}
2678*404b540aSrobert       /* In the above cycle we've missed this note:  */
2679*404b540aSrobert       set_block_for_insn (note_head, head_bb);
2680*404b540aSrobert 
2681*404b540aSrobert       PREV_INSN (note_head) = PREV_INSN (head);
2682*404b540aSrobert       NEXT_INSN (PREV_INSN (head)) = note_head;
2683*404b540aSrobert       PREV_INSN (head) = note_list;
2684*404b540aSrobert       NEXT_INSN (note_list) = head;
2685*404b540aSrobert       head = note_head;
2686*404b540aSrobert     }
2687*404b540aSrobert 
2688*404b540aSrobert   /* Debugging.  */
2689*404b540aSrobert   if (sched_verbose)
2690*404b540aSrobert     {
2691*404b540aSrobert       fprintf (sched_dump, ";;   total time = %d\n;;   new head = %d\n",
2692*404b540aSrobert 	       clock_var, INSN_UID (head));
2693*404b540aSrobert       fprintf (sched_dump, ";;   new tail = %d\n\n",
2694*404b540aSrobert 	       INSN_UID (tail));
2695*404b540aSrobert     }
2696*404b540aSrobert 
2697*404b540aSrobert   current_sched_info->head = head;
2698*404b540aSrobert   current_sched_info->tail = tail;
2699*404b540aSrobert 
2700*404b540aSrobert   free (ready.vec);
2701*404b540aSrobert 
2702*404b540aSrobert   free (ready_try);
2703*404b540aSrobert   for (i = 0; i <= rgn_n_insns; i++)
2704*404b540aSrobert     free (choice_stack [i].state);
2705*404b540aSrobert   free (choice_stack);
2706*404b540aSrobert }
2707*404b540aSrobert 
2708*404b540aSrobert /* Set_priorities: compute priority of each insn in the block.  */
2709*404b540aSrobert 
2710*404b540aSrobert int
set_priorities(rtx head,rtx tail)2711*404b540aSrobert set_priorities (rtx head, rtx tail)
2712*404b540aSrobert {
2713*404b540aSrobert   rtx insn;
2714*404b540aSrobert   int n_insn;
2715*404b540aSrobert   int sched_max_insns_priority =
2716*404b540aSrobert 	current_sched_info->sched_max_insns_priority;
2717*404b540aSrobert   rtx prev_head;
2718*404b540aSrobert 
2719*404b540aSrobert   if (head == tail && (! INSN_P (head)))
2720*404b540aSrobert     return 0;
2721*404b540aSrobert 
2722*404b540aSrobert   n_insn = 0;
2723*404b540aSrobert 
2724*404b540aSrobert   prev_head = PREV_INSN (head);
2725*404b540aSrobert   for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
2726*404b540aSrobert     {
2727*404b540aSrobert       if (!INSN_P (insn))
2728*404b540aSrobert 	continue;
2729*404b540aSrobert 
2730*404b540aSrobert       n_insn++;
2731*404b540aSrobert       (void) priority (insn);
2732*404b540aSrobert 
2733*404b540aSrobert       if (INSN_PRIORITY_KNOWN (insn))
2734*404b540aSrobert 	sched_max_insns_priority =
2735*404b540aSrobert 	  MAX (sched_max_insns_priority, INSN_PRIORITY (insn));
2736*404b540aSrobert     }
2737*404b540aSrobert 
2738*404b540aSrobert   current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
2739*404b540aSrobert 
2740*404b540aSrobert   return n_insn;
2741*404b540aSrobert }
2742*404b540aSrobert 
2743*404b540aSrobert /* Next LUID to assign to an instruction.  */
2744*404b540aSrobert static int luid;
2745*404b540aSrobert 
2746*404b540aSrobert /* Initialize some global state for the scheduler.  */
2747*404b540aSrobert 
2748*404b540aSrobert void
sched_init(void)2749*404b540aSrobert sched_init (void)
2750*404b540aSrobert {
2751*404b540aSrobert   basic_block b;
2752*404b540aSrobert   rtx insn;
2753*404b540aSrobert   int i;
2754*404b540aSrobert 
2755*404b540aSrobert   /* Switch to working copy of sched_info.  */
2756*404b540aSrobert   memcpy (&current_sched_info_var, current_sched_info,
2757*404b540aSrobert 	  sizeof (current_sched_info_var));
2758*404b540aSrobert   current_sched_info = &current_sched_info_var;
2759*404b540aSrobert 
2760*404b540aSrobert   /* Disable speculative loads in their presence if cc0 defined.  */
2761*404b540aSrobert #ifdef HAVE_cc0
2762*404b540aSrobert   flag_schedule_speculative_load = 0;
2763*404b540aSrobert #endif
2764*404b540aSrobert 
2765*404b540aSrobert   /* Set dump and sched_verbose for the desired debugging output.  If no
2766*404b540aSrobert      dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
2767*404b540aSrobert      For -fsched-verbose=N, N>=10, print everything to stderr.  */
2768*404b540aSrobert   sched_verbose = sched_verbose_param;
2769*404b540aSrobert   if (sched_verbose_param == 0 && dump_file)
2770*404b540aSrobert     sched_verbose = 1;
2771*404b540aSrobert   sched_dump = ((sched_verbose_param >= 10 || !dump_file)
2772*404b540aSrobert 		? stderr : dump_file);
2773*404b540aSrobert 
2774*404b540aSrobert   /* Initialize SPEC_INFO.  */
2775*404b540aSrobert   if (targetm.sched.set_sched_flags)
2776*404b540aSrobert     {
2777*404b540aSrobert       spec_info = &spec_info_var;
2778*404b540aSrobert       targetm.sched.set_sched_flags (spec_info);
2779*404b540aSrobert       if (current_sched_info->flags & DO_SPECULATION)
2780*404b540aSrobert 	spec_info->weakness_cutoff =
2781*404b540aSrobert 	  (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
2782*404b540aSrobert       else
2783*404b540aSrobert 	/* So we won't read anything accidentally.  */
2784*404b540aSrobert 	spec_info = 0;
2785*404b540aSrobert #ifdef ENABLE_CHECKING
2786*404b540aSrobert       check_sched_flags ();
2787*404b540aSrobert #endif
2788*404b540aSrobert     }
2789*404b540aSrobert   else
2790*404b540aSrobert     /* So we won't read anything accidentally.  */
2791*404b540aSrobert     spec_info = 0;
2792*404b540aSrobert 
2793*404b540aSrobert   /* Initialize issue_rate.  */
2794*404b540aSrobert   if (targetm.sched.issue_rate)
2795*404b540aSrobert     issue_rate = targetm.sched.issue_rate ();
2796*404b540aSrobert   else
2797*404b540aSrobert     issue_rate = 1;
2798*404b540aSrobert 
2799*404b540aSrobert   if (cached_issue_rate != issue_rate)
2800*404b540aSrobert     {
2801*404b540aSrobert       cached_issue_rate = issue_rate;
2802*404b540aSrobert       /* To invalidate max_lookahead_tries:  */
2803*404b540aSrobert       cached_first_cycle_multipass_dfa_lookahead = 0;
2804*404b540aSrobert     }
2805*404b540aSrobert 
2806*404b540aSrobert   old_max_uid = 0;
2807*404b540aSrobert   h_i_d = 0;
2808*404b540aSrobert   extend_h_i_d ();
2809*404b540aSrobert 
2810*404b540aSrobert   for (i = 0; i < old_max_uid; i++)
2811*404b540aSrobert     {
2812*404b540aSrobert       h_i_d[i].cost = -1;
2813*404b540aSrobert       h_i_d[i].todo_spec = HARD_DEP;
2814*404b540aSrobert       h_i_d[i].queue_index = QUEUE_NOWHERE;
2815*404b540aSrobert       h_i_d[i].tick = INVALID_TICK;
2816*404b540aSrobert       h_i_d[i].inter_tick = INVALID_TICK;
2817*404b540aSrobert     }
2818*404b540aSrobert 
2819*404b540aSrobert   if (targetm.sched.init_dfa_pre_cycle_insn)
2820*404b540aSrobert     targetm.sched.init_dfa_pre_cycle_insn ();
2821*404b540aSrobert 
2822*404b540aSrobert   if (targetm.sched.init_dfa_post_cycle_insn)
2823*404b540aSrobert     targetm.sched.init_dfa_post_cycle_insn ();
2824*404b540aSrobert 
2825*404b540aSrobert   dfa_start ();
2826*404b540aSrobert   dfa_state_size = state_size ();
2827*404b540aSrobert   curr_state = xmalloc (dfa_state_size);
2828*404b540aSrobert 
2829*404b540aSrobert   h_i_d[0].luid = 0;
2830*404b540aSrobert   luid = 1;
2831*404b540aSrobert   FOR_EACH_BB (b)
2832*404b540aSrobert     for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
2833*404b540aSrobert       {
2834*404b540aSrobert 	INSN_LUID (insn) = luid;
2835*404b540aSrobert 
2836*404b540aSrobert 	/* Increment the next luid, unless this is a note.  We don't
2837*404b540aSrobert 	   really need separate IDs for notes and we don't want to
2838*404b540aSrobert 	   schedule differently depending on whether or not there are
2839*404b540aSrobert 	   line-number notes, i.e., depending on whether or not we're
2840*404b540aSrobert 	   generating debugging information.  */
2841*404b540aSrobert 	if (!NOTE_P (insn))
2842*404b540aSrobert 	  ++luid;
2843*404b540aSrobert 
2844*404b540aSrobert 	if (insn == BB_END (b))
2845*404b540aSrobert 	  break;
2846*404b540aSrobert       }
2847*404b540aSrobert 
2848*404b540aSrobert   init_dependency_caches (luid);
2849*404b540aSrobert 
2850*404b540aSrobert   init_alias_analysis ();
2851*404b540aSrobert 
2852*404b540aSrobert   line_note_head = 0;
2853*404b540aSrobert   old_last_basic_block = 0;
2854*404b540aSrobert   glat_start = 0;
2855*404b540aSrobert   glat_end = 0;
2856*404b540aSrobert   extend_bb (0);
2857*404b540aSrobert 
2858*404b540aSrobert   if (current_sched_info->flags & USE_GLAT)
2859*404b540aSrobert     init_glat ();
2860*404b540aSrobert 
2861*404b540aSrobert   /* Compute INSN_REG_WEIGHT for all blocks.  We must do this before
2862*404b540aSrobert      removing death notes.  */
2863*404b540aSrobert   FOR_EACH_BB_REVERSE (b)
2864*404b540aSrobert     find_insn_reg_weight (b);
2865*404b540aSrobert 
2866*404b540aSrobert   if (targetm.sched.md_init_global)
2867*404b540aSrobert       targetm.sched.md_init_global (sched_dump, sched_verbose, old_max_uid);
2868*404b540aSrobert 
2869*404b540aSrobert   nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
2870*404b540aSrobert   before_recovery = 0;
2871*404b540aSrobert 
2872*404b540aSrobert #ifdef ENABLE_CHECKING
2873*404b540aSrobert   /* This is used preferably for finding bugs in check_cfg () itself.  */
2874*404b540aSrobert   check_cfg (0, 0);
2875*404b540aSrobert #endif
2876*404b540aSrobert }
2877*404b540aSrobert 
2878*404b540aSrobert /* Free global data used during insn scheduling.  */
2879*404b540aSrobert 
2880*404b540aSrobert void
sched_finish(void)2881*404b540aSrobert sched_finish (void)
2882*404b540aSrobert {
2883*404b540aSrobert   free (h_i_d);
2884*404b540aSrobert   free (curr_state);
2885*404b540aSrobert   dfa_finish ();
2886*404b540aSrobert   free_dependency_caches ();
2887*404b540aSrobert   end_alias_analysis ();
2888*404b540aSrobert   free (line_note_head);
2889*404b540aSrobert   free_glat ();
2890*404b540aSrobert 
2891*404b540aSrobert   if (targetm.sched.md_finish_global)
2892*404b540aSrobert     targetm.sched.md_finish_global (sched_dump, sched_verbose);
2893*404b540aSrobert 
2894*404b540aSrobert   if (spec_info && spec_info->dump)
2895*404b540aSrobert     {
2896*404b540aSrobert       char c = reload_completed ? 'a' : 'b';
2897*404b540aSrobert 
2898*404b540aSrobert       fprintf (spec_info->dump,
2899*404b540aSrobert 	       ";; %s:\n", current_function_name ());
2900*404b540aSrobert 
2901*404b540aSrobert       fprintf (spec_info->dump,
2902*404b540aSrobert                ";; Procedure %cr-begin-data-spec motions == %d\n",
2903*404b540aSrobert                c, nr_begin_data);
2904*404b540aSrobert       fprintf (spec_info->dump,
2905*404b540aSrobert                ";; Procedure %cr-be-in-data-spec motions == %d\n",
2906*404b540aSrobert                c, nr_be_in_data);
2907*404b540aSrobert       fprintf (spec_info->dump,
2908*404b540aSrobert                ";; Procedure %cr-begin-control-spec motions == %d\n",
2909*404b540aSrobert                c, nr_begin_control);
2910*404b540aSrobert       fprintf (spec_info->dump,
2911*404b540aSrobert                ";; Procedure %cr-be-in-control-spec motions == %d\n",
2912*404b540aSrobert                c, nr_be_in_control);
2913*404b540aSrobert     }
2914*404b540aSrobert 
2915*404b540aSrobert #ifdef ENABLE_CHECKING
2916*404b540aSrobert   /* After reload ia64 backend clobbers CFG, so can't check anything.  */
2917*404b540aSrobert   if (!reload_completed)
2918*404b540aSrobert     check_cfg (0, 0);
2919*404b540aSrobert #endif
2920*404b540aSrobert 
2921*404b540aSrobert   current_sched_info = NULL;
2922*404b540aSrobert }
2923*404b540aSrobert 
2924*404b540aSrobert /* Fix INSN_TICKs of the instructions in the current block as well as
2925*404b540aSrobert    INSN_TICKs of their dependents.
2926*404b540aSrobert    HEAD and TAIL are the begin and the end of the current scheduled block.  */
2927*404b540aSrobert static void
fix_inter_tick(rtx head,rtx tail)2928*404b540aSrobert fix_inter_tick (rtx head, rtx tail)
2929*404b540aSrobert {
2930*404b540aSrobert   /* Set of instructions with corrected INSN_TICK.  */
2931*404b540aSrobert   bitmap_head processed;
2932*404b540aSrobert   int next_clock = clock_var + 1;
2933*404b540aSrobert 
2934*404b540aSrobert   bitmap_initialize (&processed, 0);
2935*404b540aSrobert 
2936*404b540aSrobert   /* Iterates over scheduled instructions and fix their INSN_TICKs and
2937*404b540aSrobert      INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
2938*404b540aSrobert      across different blocks.  */
2939*404b540aSrobert   for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
2940*404b540aSrobert     {
2941*404b540aSrobert       if (INSN_P (head))
2942*404b540aSrobert 	{
2943*404b540aSrobert 	  int tick;
2944*404b540aSrobert 	  rtx link;
2945*404b540aSrobert 
2946*404b540aSrobert 	  tick = INSN_TICK (head);
2947*404b540aSrobert 	  gcc_assert (tick >= MIN_TICK);
2948*404b540aSrobert 
2949*404b540aSrobert 	  /* Fix INSN_TICK of instruction from just scheduled block.  */
2950*404b540aSrobert 	  if (!bitmap_bit_p (&processed, INSN_LUID (head)))
2951*404b540aSrobert 	    {
2952*404b540aSrobert 	      bitmap_set_bit (&processed, INSN_LUID (head));
2953*404b540aSrobert 	      tick -= next_clock;
2954*404b540aSrobert 
2955*404b540aSrobert 	      if (tick < MIN_TICK)
2956*404b540aSrobert 		tick = MIN_TICK;
2957*404b540aSrobert 
2958*404b540aSrobert 	      INSN_TICK (head) = tick;
2959*404b540aSrobert 	    }
2960*404b540aSrobert 
2961*404b540aSrobert 	  for (link = INSN_DEPEND (head); link; link = XEXP (link, 1))
2962*404b540aSrobert 	    {
2963*404b540aSrobert 	      rtx next;
2964*404b540aSrobert 
2965*404b540aSrobert 	      next = XEXP (link, 0);
2966*404b540aSrobert 	      tick = INSN_TICK (next);
2967*404b540aSrobert 
2968*404b540aSrobert 	      if (tick != INVALID_TICK
2969*404b540aSrobert 		  /* If NEXT has its INSN_TICK calculated, fix it.
2970*404b540aSrobert 		     If not - it will be properly calculated from
2971*404b540aSrobert 		     scratch later in fix_tick_ready.  */
2972*404b540aSrobert 		  && !bitmap_bit_p (&processed, INSN_LUID (next)))
2973*404b540aSrobert 		{
2974*404b540aSrobert 		  bitmap_set_bit (&processed, INSN_LUID (next));
2975*404b540aSrobert 		  tick -= next_clock;
2976*404b540aSrobert 
2977*404b540aSrobert 		  if (tick < MIN_TICK)
2978*404b540aSrobert 		    tick = MIN_TICK;
2979*404b540aSrobert 
2980*404b540aSrobert 		  if (tick > INTER_TICK (next))
2981*404b540aSrobert 		    INTER_TICK (next) = tick;
2982*404b540aSrobert 		  else
2983*404b540aSrobert 		    tick = INTER_TICK (next);
2984*404b540aSrobert 
2985*404b540aSrobert 		  INSN_TICK (next) = tick;
2986*404b540aSrobert 		}
2987*404b540aSrobert 	    }
2988*404b540aSrobert 	}
2989*404b540aSrobert     }
2990*404b540aSrobert   bitmap_clear (&processed);
2991*404b540aSrobert }
2992*404b540aSrobert 
2993*404b540aSrobert /* Check if NEXT is ready to be added to the ready or queue list.
2994*404b540aSrobert    If "yes", add it to the proper list.
2995*404b540aSrobert    Returns:
2996*404b540aSrobert       -1 - is not ready yet,
2997*404b540aSrobert        0 - added to the ready list,
2998*404b540aSrobert    0 < N - queued for N cycles.  */
2999*404b540aSrobert int
try_ready(rtx next)3000*404b540aSrobert try_ready (rtx next)
3001*404b540aSrobert {
3002*404b540aSrobert   ds_t old_ts, *ts;
3003*404b540aSrobert   rtx link;
3004*404b540aSrobert 
3005*404b540aSrobert   ts = &TODO_SPEC (next);
3006*404b540aSrobert   old_ts = *ts;
3007*404b540aSrobert 
3008*404b540aSrobert   gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
3009*404b540aSrobert 	      && ((old_ts & HARD_DEP)
3010*404b540aSrobert 		  || (old_ts & SPECULATIVE)));
3011*404b540aSrobert 
3012*404b540aSrobert   if (!(current_sched_info->flags & DO_SPECULATION))
3013*404b540aSrobert     {
3014*404b540aSrobert       if (!LOG_LINKS (next))
3015*404b540aSrobert         *ts &= ~HARD_DEP;
3016*404b540aSrobert     }
3017*404b540aSrobert   else
3018*404b540aSrobert     {
3019*404b540aSrobert       *ts &= ~SPECULATIVE & ~HARD_DEP;
3020*404b540aSrobert 
3021*404b540aSrobert       link = LOG_LINKS (next);
3022*404b540aSrobert       if (link)
3023*404b540aSrobert         {
3024*404b540aSrobert           /* LOG_LINKS are maintained sorted.
3025*404b540aSrobert              So if DEP_STATUS of the first dep is SPECULATIVE,
3026*404b540aSrobert              than all other deps are speculative too.  */
3027*404b540aSrobert           if (DEP_STATUS (link) & SPECULATIVE)
3028*404b540aSrobert             {
3029*404b540aSrobert               /* Now we've got NEXT with speculative deps only.
3030*404b540aSrobert                  1. Look at the deps to see what we have to do.
3031*404b540aSrobert                  2. Check if we can do 'todo'.  */
3032*404b540aSrobert 	      *ts = DEP_STATUS (link) & SPECULATIVE;
3033*404b540aSrobert               while ((link = XEXP (link, 1)))
3034*404b540aSrobert 		*ts = ds_merge (*ts, DEP_STATUS (link) & SPECULATIVE);
3035*404b540aSrobert 
3036*404b540aSrobert 	      if (dep_weak (*ts) < spec_info->weakness_cutoff)
3037*404b540aSrobert 		/* Too few points.  */
3038*404b540aSrobert 		*ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3039*404b540aSrobert 	    }
3040*404b540aSrobert           else
3041*404b540aSrobert             *ts |= HARD_DEP;
3042*404b540aSrobert         }
3043*404b540aSrobert     }
3044*404b540aSrobert 
3045*404b540aSrobert   if (*ts & HARD_DEP)
3046*404b540aSrobert     gcc_assert (*ts == old_ts
3047*404b540aSrobert 		&& QUEUE_INDEX (next) == QUEUE_NOWHERE);
3048*404b540aSrobert   else if (current_sched_info->new_ready)
3049*404b540aSrobert     *ts = current_sched_info->new_ready (next, *ts);
3050*404b540aSrobert 
3051*404b540aSrobert   /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3052*404b540aSrobert      have its original pattern or changed (speculative) one.  This is due
3053*404b540aSrobert      to changing ebb in region scheduling.
3054*404b540aSrobert      * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3055*404b540aSrobert      has speculative pattern.
3056*404b540aSrobert 
3057*404b540aSrobert      We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3058*404b540aSrobert      control-speculative NEXT could have been discarded by sched-rgn.c
3059*404b540aSrobert      (the same case as when discarded by can_schedule_ready_p ()).  */
3060*404b540aSrobert 
3061*404b540aSrobert   if ((*ts & SPECULATIVE)
3062*404b540aSrobert       /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3063*404b540aSrobert 	 need to change anything.  */
3064*404b540aSrobert       && *ts != old_ts)
3065*404b540aSrobert     {
3066*404b540aSrobert       int res;
3067*404b540aSrobert       rtx new_pat;
3068*404b540aSrobert 
3069*404b540aSrobert       gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3070*404b540aSrobert 
3071*404b540aSrobert       res = speculate_insn (next, *ts, &new_pat);
3072*404b540aSrobert 
3073*404b540aSrobert       switch (res)
3074*404b540aSrobert 	{
3075*404b540aSrobert 	case -1:
3076*404b540aSrobert 	  /* It would be nice to change DEP_STATUS of all dependences,
3077*404b540aSrobert 	     which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3078*404b540aSrobert 	     so we won't reanalyze anything.  */
3079*404b540aSrobert 	  *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3080*404b540aSrobert 	  break;
3081*404b540aSrobert 
3082*404b540aSrobert 	case 0:
3083*404b540aSrobert 	  /* We follow the rule, that every speculative insn
3084*404b540aSrobert 	     has non-null ORIG_PAT.  */
3085*404b540aSrobert 	  if (!ORIG_PAT (next))
3086*404b540aSrobert 	    ORIG_PAT (next) = PATTERN (next);
3087*404b540aSrobert 	  break;
3088*404b540aSrobert 
3089*404b540aSrobert 	case 1:
3090*404b540aSrobert 	  if (!ORIG_PAT (next))
3091*404b540aSrobert 	    /* If we gonna to overwrite the original pattern of insn,
3092*404b540aSrobert 	       save it.  */
3093*404b540aSrobert 	    ORIG_PAT (next) = PATTERN (next);
3094*404b540aSrobert 
3095*404b540aSrobert 	  change_pattern (next, new_pat);
3096*404b540aSrobert 	  break;
3097*404b540aSrobert 
3098*404b540aSrobert 	default:
3099*404b540aSrobert 	  gcc_unreachable ();
3100*404b540aSrobert 	}
3101*404b540aSrobert     }
3102*404b540aSrobert 
3103*404b540aSrobert   /* We need to restore pattern only if (*ts == 0), because otherwise it is
3104*404b540aSrobert      either correct (*ts & SPECULATIVE),
3105*404b540aSrobert      or we simply don't care (*ts & HARD_DEP).  */
3106*404b540aSrobert 
3107*404b540aSrobert   gcc_assert (!ORIG_PAT (next)
3108*404b540aSrobert 	      || !IS_SPECULATION_BRANCHY_CHECK_P (next));
3109*404b540aSrobert 
3110*404b540aSrobert   if (*ts & HARD_DEP)
3111*404b540aSrobert     {
3112*404b540aSrobert       /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3113*404b540aSrobert 	 control-speculative NEXT could have been discarded by sched-rgn.c
3114*404b540aSrobert 	 (the same case as when discarded by can_schedule_ready_p ()).  */
3115*404b540aSrobert       /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3116*404b540aSrobert 
3117*404b540aSrobert       change_queue_index (next, QUEUE_NOWHERE);
3118*404b540aSrobert       return -1;
3119*404b540aSrobert     }
3120*404b540aSrobert   else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
3121*404b540aSrobert     /* We should change pattern of every previously speculative
3122*404b540aSrobert        instruction - and we determine if NEXT was speculative by using
3123*404b540aSrobert        ORIG_PAT field.  Except one case - speculation checks have ORIG_PAT
3124*404b540aSrobert        pat too, so skip them.  */
3125*404b540aSrobert     {
3126*404b540aSrobert       change_pattern (next, ORIG_PAT (next));
3127*404b540aSrobert       ORIG_PAT (next) = 0;
3128*404b540aSrobert     }
3129*404b540aSrobert 
3130*404b540aSrobert   if (sched_verbose >= 2)
3131*404b540aSrobert     {
3132*404b540aSrobert       int s = TODO_SPEC (next);
3133*404b540aSrobert 
3134*404b540aSrobert       fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3135*404b540aSrobert                (*current_sched_info->print_insn) (next, 0));
3136*404b540aSrobert 
3137*404b540aSrobert       if (spec_info && spec_info->dump)
3138*404b540aSrobert         {
3139*404b540aSrobert           if (s & BEGIN_DATA)
3140*404b540aSrobert             fprintf (spec_info->dump, "; data-spec;");
3141*404b540aSrobert           if (s & BEGIN_CONTROL)
3142*404b540aSrobert             fprintf (spec_info->dump, "; control-spec;");
3143*404b540aSrobert           if (s & BE_IN_CONTROL)
3144*404b540aSrobert             fprintf (spec_info->dump, "; in-control-spec;");
3145*404b540aSrobert         }
3146*404b540aSrobert 
3147*404b540aSrobert       fprintf (sched_dump, "\n");
3148*404b540aSrobert     }
3149*404b540aSrobert 
3150*404b540aSrobert   adjust_priority (next);
3151*404b540aSrobert 
3152*404b540aSrobert   return fix_tick_ready (next);
3153*404b540aSrobert }
3154*404b540aSrobert 
3155*404b540aSrobert /* Calculate INSN_TICK of NEXT and add it to either ready or queue list.  */
3156*404b540aSrobert static int
fix_tick_ready(rtx next)3157*404b540aSrobert fix_tick_ready (rtx next)
3158*404b540aSrobert {
3159*404b540aSrobert   rtx link;
3160*404b540aSrobert   int tick, delay;
3161*404b540aSrobert 
3162*404b540aSrobert   link = RESOLVED_DEPS (next);
3163*404b540aSrobert 
3164*404b540aSrobert   if (link)
3165*404b540aSrobert     {
3166*404b540aSrobert       int full_p;
3167*404b540aSrobert 
3168*404b540aSrobert       tick = INSN_TICK (next);
3169*404b540aSrobert       /* if tick is not equal to INVALID_TICK, then update
3170*404b540aSrobert 	 INSN_TICK of NEXT with the most recent resolved dependence
3171*404b540aSrobert 	 cost.  Otherwise, recalculate from scratch.  */
3172*404b540aSrobert       full_p = tick == INVALID_TICK;
3173*404b540aSrobert       do
3174*404b540aSrobert         {
3175*404b540aSrobert           rtx pro;
3176*404b540aSrobert           int tick1;
3177*404b540aSrobert 
3178*404b540aSrobert           pro = XEXP (link, 0);
3179*404b540aSrobert 	  gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3180*404b540aSrobert 
3181*404b540aSrobert           tick1 = INSN_TICK (pro) + insn_cost (pro, link, next);
3182*404b540aSrobert           if (tick1 > tick)
3183*404b540aSrobert             tick = tick1;
3184*404b540aSrobert         }
3185*404b540aSrobert       while ((link = XEXP (link, 1)) && full_p);
3186*404b540aSrobert     }
3187*404b540aSrobert   else
3188*404b540aSrobert     tick = -1;
3189*404b540aSrobert 
3190*404b540aSrobert   INSN_TICK (next) = tick;
3191*404b540aSrobert 
3192*404b540aSrobert   delay = tick - clock_var;
3193*404b540aSrobert   if (delay <= 0)
3194*404b540aSrobert     delay = QUEUE_READY;
3195*404b540aSrobert 
3196*404b540aSrobert   change_queue_index (next, delay);
3197*404b540aSrobert 
3198*404b540aSrobert   return delay;
3199*404b540aSrobert }
3200*404b540aSrobert 
3201*404b540aSrobert /* Move NEXT to the proper queue list with (DELAY >= 1),
3202*404b540aSrobert    or add it to the ready list (DELAY == QUEUE_READY),
3203*404b540aSrobert    or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE).  */
3204*404b540aSrobert static void
change_queue_index(rtx next,int delay)3205*404b540aSrobert change_queue_index (rtx next, int delay)
3206*404b540aSrobert {
3207*404b540aSrobert   int i = QUEUE_INDEX (next);
3208*404b540aSrobert 
3209*404b540aSrobert   gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3210*404b540aSrobert 	      && delay != 0);
3211*404b540aSrobert   gcc_assert (i != QUEUE_SCHEDULED);
3212*404b540aSrobert 
3213*404b540aSrobert   if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3214*404b540aSrobert       || (delay < 0 && delay == i))
3215*404b540aSrobert     /* We have nothing to do.  */
3216*404b540aSrobert     return;
3217*404b540aSrobert 
3218*404b540aSrobert   /* Remove NEXT from wherever it is now.  */
3219*404b540aSrobert   if (i == QUEUE_READY)
3220*404b540aSrobert     ready_remove_insn (next);
3221*404b540aSrobert   else if (i >= 0)
3222*404b540aSrobert     queue_remove (next);
3223*404b540aSrobert 
3224*404b540aSrobert   /* Add it to the proper place.  */
3225*404b540aSrobert   if (delay == QUEUE_READY)
3226*404b540aSrobert     ready_add (readyp, next, false);
3227*404b540aSrobert   else if (delay >= 1)
3228*404b540aSrobert     queue_insn (next, delay);
3229*404b540aSrobert 
3230*404b540aSrobert   if (sched_verbose >= 2)
3231*404b540aSrobert     {
3232*404b540aSrobert       fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3233*404b540aSrobert 	       (*current_sched_info->print_insn) (next, 0));
3234*404b540aSrobert 
3235*404b540aSrobert       if (delay == QUEUE_READY)
3236*404b540aSrobert 	fprintf (sched_dump, " into ready\n");
3237*404b540aSrobert       else if (delay >= 1)
3238*404b540aSrobert 	fprintf (sched_dump, " into queue with cost=%d\n", delay);
3239*404b540aSrobert       else
3240*404b540aSrobert 	fprintf (sched_dump, " removed from ready or queue lists\n");
3241*404b540aSrobert     }
3242*404b540aSrobert }
3243*404b540aSrobert 
3244*404b540aSrobert /* INSN is being scheduled.  Resolve the dependence between INSN and NEXT.  */
3245*404b540aSrobert static void
resolve_dep(rtx next,rtx insn)3246*404b540aSrobert resolve_dep (rtx next, rtx insn)
3247*404b540aSrobert {
3248*404b540aSrobert   rtx dep;
3249*404b540aSrobert 
3250*404b540aSrobert   INSN_DEP_COUNT (next)--;
3251*404b540aSrobert 
3252*404b540aSrobert   dep = remove_list_elem (insn, &LOG_LINKS (next));
3253*404b540aSrobert   XEXP (dep, 1) = RESOLVED_DEPS (next);
3254*404b540aSrobert   RESOLVED_DEPS (next) = dep;
3255*404b540aSrobert 
3256*404b540aSrobert   gcc_assert ((INSN_DEP_COUNT (next) != 0 || !LOG_LINKS (next))
3257*404b540aSrobert 	      && (LOG_LINKS (next) || INSN_DEP_COUNT (next) == 0));
3258*404b540aSrobert }
3259*404b540aSrobert 
3260*404b540aSrobert /* Extend H_I_D data.  */
3261*404b540aSrobert static void
extend_h_i_d(void)3262*404b540aSrobert extend_h_i_d (void)
3263*404b540aSrobert {
3264*404b540aSrobert   /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
3265*404b540aSrobert      pseudos which do not cross calls.  */
3266*404b540aSrobert   int new_max_uid = get_max_uid() + 1;
3267*404b540aSrobert 
3268*404b540aSrobert   h_i_d = xrecalloc (h_i_d, new_max_uid, old_max_uid, sizeof (*h_i_d));
3269*404b540aSrobert   old_max_uid = new_max_uid;
3270*404b540aSrobert 
3271*404b540aSrobert   if (targetm.sched.h_i_d_extended)
3272*404b540aSrobert     targetm.sched.h_i_d_extended ();
3273*404b540aSrobert }
3274*404b540aSrobert 
3275*404b540aSrobert /* Extend READY, READY_TRY and CHOICE_STACK arrays.
3276*404b540aSrobert    N_NEW_INSNS is the number of additional elements to allocate.  */
3277*404b540aSrobert static void
extend_ready(int n_new_insns)3278*404b540aSrobert extend_ready (int n_new_insns)
3279*404b540aSrobert {
3280*404b540aSrobert   int i;
3281*404b540aSrobert 
3282*404b540aSrobert   readyp->veclen = rgn_n_insns + n_new_insns + 1 + issue_rate;
3283*404b540aSrobert   readyp->vec = XRESIZEVEC (rtx, readyp->vec, readyp->veclen);
3284*404b540aSrobert 
3285*404b540aSrobert   ready_try = xrecalloc (ready_try, rgn_n_insns + n_new_insns + 1,
3286*404b540aSrobert 			 rgn_n_insns + 1, sizeof (char));
3287*404b540aSrobert 
3288*404b540aSrobert   rgn_n_insns += n_new_insns;
3289*404b540aSrobert 
3290*404b540aSrobert   choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3291*404b540aSrobert 			     rgn_n_insns + 1);
3292*404b540aSrobert 
3293*404b540aSrobert   for (i = rgn_n_insns; n_new_insns--; i--)
3294*404b540aSrobert     choice_stack[i].state = xmalloc (dfa_state_size);
3295*404b540aSrobert }
3296*404b540aSrobert 
3297*404b540aSrobert /* Extend global scheduler structures (those, that live across calls to
3298*404b540aSrobert    schedule_block) to include information about just emitted INSN.  */
3299*404b540aSrobert static void
extend_global(rtx insn)3300*404b540aSrobert extend_global (rtx insn)
3301*404b540aSrobert {
3302*404b540aSrobert   gcc_assert (INSN_P (insn));
3303*404b540aSrobert   /* These structures have scheduler scope.  */
3304*404b540aSrobert   extend_h_i_d ();
3305*404b540aSrobert   init_h_i_d (insn);
3306*404b540aSrobert 
3307*404b540aSrobert   extend_dependency_caches (1, 0);
3308*404b540aSrobert }
3309*404b540aSrobert 
3310*404b540aSrobert /* Extends global and local scheduler structures to include information
3311*404b540aSrobert    about just emitted INSN.  */
3312*404b540aSrobert static void
extend_all(rtx insn)3313*404b540aSrobert extend_all (rtx insn)
3314*404b540aSrobert {
3315*404b540aSrobert   extend_global (insn);
3316*404b540aSrobert 
3317*404b540aSrobert   /* These structures have block scope.  */
3318*404b540aSrobert   extend_ready (1);
3319*404b540aSrobert 
3320*404b540aSrobert   (*current_sched_info->add_remove_insn) (insn, 0);
3321*404b540aSrobert }
3322*404b540aSrobert 
3323*404b540aSrobert /* Initialize h_i_d entry of the new INSN with default values.
3324*404b540aSrobert    Values, that are not explicitly initialized here, hold zero.  */
3325*404b540aSrobert static void
init_h_i_d(rtx insn)3326*404b540aSrobert init_h_i_d (rtx insn)
3327*404b540aSrobert {
3328*404b540aSrobert   INSN_LUID (insn) = luid++;
3329*404b540aSrobert   INSN_COST (insn) = -1;
3330*404b540aSrobert   TODO_SPEC (insn) = HARD_DEP;
3331*404b540aSrobert   QUEUE_INDEX (insn) = QUEUE_NOWHERE;
3332*404b540aSrobert   INSN_TICK (insn) = INVALID_TICK;
3333*404b540aSrobert   INTER_TICK (insn) = INVALID_TICK;
3334*404b540aSrobert   find_insn_reg_weight1 (insn);
3335*404b540aSrobert }
3336*404b540aSrobert 
3337*404b540aSrobert /* Generates recovery code for INSN.  */
3338*404b540aSrobert static void
generate_recovery_code(rtx insn)3339*404b540aSrobert generate_recovery_code (rtx insn)
3340*404b540aSrobert {
3341*404b540aSrobert   if (TODO_SPEC (insn) & BEGIN_SPEC)
3342*404b540aSrobert     begin_speculative_block (insn);
3343*404b540aSrobert 
3344*404b540aSrobert   /* Here we have insn with no dependencies to
3345*404b540aSrobert      instructions other then CHECK_SPEC ones.  */
3346*404b540aSrobert 
3347*404b540aSrobert   if (TODO_SPEC (insn) & BE_IN_SPEC)
3348*404b540aSrobert     add_to_speculative_block (insn);
3349*404b540aSrobert }
3350*404b540aSrobert 
3351*404b540aSrobert /* Helper function.
3352*404b540aSrobert    Tries to add speculative dependencies of type FS between instructions
3353*404b540aSrobert    in LINK list and TWIN.  */
3354*404b540aSrobert static void
process_insn_depend_be_in_spec(rtx link,rtx twin,ds_t fs)3355*404b540aSrobert process_insn_depend_be_in_spec (rtx link, rtx twin, ds_t fs)
3356*404b540aSrobert {
3357*404b540aSrobert   for (; link; link = XEXP (link, 1))
3358*404b540aSrobert     {
3359*404b540aSrobert       ds_t ds;
3360*404b540aSrobert       rtx consumer;
3361*404b540aSrobert 
3362*404b540aSrobert       consumer = XEXP (link, 0);
3363*404b540aSrobert 
3364*404b540aSrobert       ds = DEP_STATUS (link);
3365*404b540aSrobert 
3366*404b540aSrobert       if (/* If we want to create speculative dep.  */
3367*404b540aSrobert 	  fs
3368*404b540aSrobert 	  /* And we can do that because this is a true dep.  */
3369*404b540aSrobert 	  && (ds & DEP_TYPES) == DEP_TRUE)
3370*404b540aSrobert 	{
3371*404b540aSrobert 	  gcc_assert (!(ds & BE_IN_SPEC));
3372*404b540aSrobert 
3373*404b540aSrobert 	  if (/* If this dep can be overcome with 'begin speculation'.  */
3374*404b540aSrobert 	      ds & BEGIN_SPEC)
3375*404b540aSrobert 	    /* Then we have a choice: keep the dep 'begin speculative'
3376*404b540aSrobert 	       or transform it into 'be in speculative'.  */
3377*404b540aSrobert 	    {
3378*404b540aSrobert 	      if (/* In try_ready we assert that if insn once became ready
3379*404b540aSrobert 		     it can be removed from the ready (or queue) list only
3380*404b540aSrobert 		     due to backend decision.  Hence we can't let the
3381*404b540aSrobert 		     probability of the speculative dep to decrease.  */
3382*404b540aSrobert 		  dep_weak (ds) <= dep_weak (fs))
3383*404b540aSrobert 		/* Transform it to be in speculative.  */
3384*404b540aSrobert 		ds = (ds & ~BEGIN_SPEC) | fs;
3385*404b540aSrobert 	    }
3386*404b540aSrobert 	  else
3387*404b540aSrobert 	    /* Mark the dep as 'be in speculative'.  */
3388*404b540aSrobert 	    ds |= fs;
3389*404b540aSrobert 	}
3390*404b540aSrobert 
3391*404b540aSrobert       add_back_forw_dep (consumer, twin, REG_NOTE_KIND (link), ds);
3392*404b540aSrobert     }
3393*404b540aSrobert }
3394*404b540aSrobert 
3395*404b540aSrobert /* Generates recovery code for BEGIN speculative INSN.  */
3396*404b540aSrobert static void
begin_speculative_block(rtx insn)3397*404b540aSrobert begin_speculative_block (rtx insn)
3398*404b540aSrobert {
3399*404b540aSrobert   if (TODO_SPEC (insn) & BEGIN_DATA)
3400*404b540aSrobert     nr_begin_data++;
3401*404b540aSrobert   if (TODO_SPEC (insn) & BEGIN_CONTROL)
3402*404b540aSrobert     nr_begin_control++;
3403*404b540aSrobert 
3404*404b540aSrobert   create_check_block_twin (insn, false);
3405*404b540aSrobert 
3406*404b540aSrobert   TODO_SPEC (insn) &= ~BEGIN_SPEC;
3407*404b540aSrobert }
3408*404b540aSrobert 
3409*404b540aSrobert /* Generates recovery code for BE_IN speculative INSN.  */
3410*404b540aSrobert static void
add_to_speculative_block(rtx insn)3411*404b540aSrobert add_to_speculative_block (rtx insn)
3412*404b540aSrobert {
3413*404b540aSrobert   ds_t ts;
3414*404b540aSrobert   rtx link, twins = NULL;
3415*404b540aSrobert 
3416*404b540aSrobert   ts = TODO_SPEC (insn);
3417*404b540aSrobert   gcc_assert (!(ts & ~BE_IN_SPEC));
3418*404b540aSrobert 
3419*404b540aSrobert   if (ts & BE_IN_DATA)
3420*404b540aSrobert     nr_be_in_data++;
3421*404b540aSrobert   if (ts & BE_IN_CONTROL)
3422*404b540aSrobert     nr_be_in_control++;
3423*404b540aSrobert 
3424*404b540aSrobert   TODO_SPEC (insn) &= ~BE_IN_SPEC;
3425*404b540aSrobert   gcc_assert (!TODO_SPEC (insn));
3426*404b540aSrobert 
3427*404b540aSrobert   DONE_SPEC (insn) |= ts;
3428*404b540aSrobert 
3429*404b540aSrobert   /* First we convert all simple checks to branchy.  */
3430*404b540aSrobert   for (link = LOG_LINKS (insn); link;)
3431*404b540aSrobert     {
3432*404b540aSrobert       rtx check;
3433*404b540aSrobert 
3434*404b540aSrobert       check = XEXP (link, 0);
3435*404b540aSrobert 
3436*404b540aSrobert       if (IS_SPECULATION_SIMPLE_CHECK_P (check))
3437*404b540aSrobert 	{
3438*404b540aSrobert 	  create_check_block_twin (check, true);
3439*404b540aSrobert 	  link = LOG_LINKS (insn);
3440*404b540aSrobert 	}
3441*404b540aSrobert       else
3442*404b540aSrobert 	link = XEXP (link, 1);
3443*404b540aSrobert     }
3444*404b540aSrobert 
3445*404b540aSrobert   clear_priorities (insn);
3446*404b540aSrobert 
3447*404b540aSrobert   do
3448*404b540aSrobert     {
3449*404b540aSrobert       rtx link, check, twin;
3450*404b540aSrobert       basic_block rec;
3451*404b540aSrobert 
3452*404b540aSrobert       link = LOG_LINKS (insn);
3453*404b540aSrobert       gcc_assert (!(DEP_STATUS (link) & BEGIN_SPEC)
3454*404b540aSrobert 		  && (DEP_STATUS (link) & BE_IN_SPEC)
3455*404b540aSrobert 		  && (DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
3456*404b540aSrobert 
3457*404b540aSrobert       check = XEXP (link, 0);
3458*404b540aSrobert 
3459*404b540aSrobert       gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
3460*404b540aSrobert 		  && QUEUE_INDEX (check) == QUEUE_NOWHERE);
3461*404b540aSrobert 
3462*404b540aSrobert       rec = BLOCK_FOR_INSN (check);
3463*404b540aSrobert 
3464*404b540aSrobert       twin = emit_insn_before (copy_rtx (PATTERN (insn)), BB_END (rec));
3465*404b540aSrobert       extend_global (twin);
3466*404b540aSrobert 
3467*404b540aSrobert       RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
3468*404b540aSrobert 
3469*404b540aSrobert       if (sched_verbose && spec_info->dump)
3470*404b540aSrobert         /* INSN_BB (insn) isn't determined for twin insns yet.
3471*404b540aSrobert            So we can't use current_sched_info->print_insn.  */
3472*404b540aSrobert         fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3473*404b540aSrobert                  INSN_UID (twin), rec->index);
3474*404b540aSrobert 
3475*404b540aSrobert       twins = alloc_INSN_LIST (twin, twins);
3476*404b540aSrobert 
3477*404b540aSrobert       /* Add dependences between TWIN and all appropriate
3478*404b540aSrobert 	 instructions from REC.  */
3479*404b540aSrobert       do
3480*404b540aSrobert 	{
3481*404b540aSrobert 	  add_back_forw_dep (twin, check, REG_DEP_TRUE, DEP_TRUE);
3482*404b540aSrobert 
3483*404b540aSrobert 	  do
3484*404b540aSrobert 	    {
3485*404b540aSrobert 	      link = XEXP (link, 1);
3486*404b540aSrobert 	      if (link)
3487*404b540aSrobert 		{
3488*404b540aSrobert 		  check = XEXP (link, 0);
3489*404b540aSrobert 		  if (BLOCK_FOR_INSN (check) == rec)
3490*404b540aSrobert 		    break;
3491*404b540aSrobert 		}
3492*404b540aSrobert 	      else
3493*404b540aSrobert 		break;
3494*404b540aSrobert 	    }
3495*404b540aSrobert 	  while (1);
3496*404b540aSrobert 	}
3497*404b540aSrobert       while (link);
3498*404b540aSrobert 
3499*404b540aSrobert       process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, ts);
3500*404b540aSrobert 
3501*404b540aSrobert       for (link = LOG_LINKS (insn); link;)
3502*404b540aSrobert 	{
3503*404b540aSrobert 	  check = XEXP (link, 0);
3504*404b540aSrobert 
3505*404b540aSrobert 	  if (BLOCK_FOR_INSN (check) == rec)
3506*404b540aSrobert 	    {
3507*404b540aSrobert 	      delete_back_forw_dep (insn, check);
3508*404b540aSrobert 	      link = LOG_LINKS (insn);
3509*404b540aSrobert 	    }
3510*404b540aSrobert 	  else
3511*404b540aSrobert 	    link = XEXP (link, 1);
3512*404b540aSrobert 	}
3513*404b540aSrobert     }
3514*404b540aSrobert   while (LOG_LINKS (insn));
3515*404b540aSrobert 
3516*404b540aSrobert   /* We can't add the dependence between insn and twin earlier because
3517*404b540aSrobert      that would make twin appear in the INSN_DEPEND (insn).  */
3518*404b540aSrobert   while (twins)
3519*404b540aSrobert     {
3520*404b540aSrobert       rtx twin;
3521*404b540aSrobert 
3522*404b540aSrobert       twin = XEXP (twins, 0);
3523*404b540aSrobert       calc_priorities (twin);
3524*404b540aSrobert       add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3525*404b540aSrobert 
3526*404b540aSrobert       twin = XEXP (twins, 1);
3527*404b540aSrobert       free_INSN_LIST_node (twins);
3528*404b540aSrobert       twins = twin;
3529*404b540aSrobert     }
3530*404b540aSrobert }
3531*404b540aSrobert 
3532*404b540aSrobert /* Extends and fills with zeros (only the new part) array pointed to by P.  */
3533*404b540aSrobert void *
xrecalloc(void * p,size_t new_nmemb,size_t old_nmemb,size_t size)3534*404b540aSrobert xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
3535*404b540aSrobert {
3536*404b540aSrobert   gcc_assert (new_nmemb >= old_nmemb);
3537*404b540aSrobert   p = XRESIZEVAR (void, p, new_nmemb * size);
3538*404b540aSrobert   memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
3539*404b540aSrobert   return p;
3540*404b540aSrobert }
3541*404b540aSrobert 
3542*404b540aSrobert /* Return the probability of speculation success for the speculation
3543*404b540aSrobert    status DS.  */
3544*404b540aSrobert static dw_t
dep_weak(ds_t ds)3545*404b540aSrobert dep_weak (ds_t ds)
3546*404b540aSrobert {
3547*404b540aSrobert   ds_t res = 1, dt;
3548*404b540aSrobert   int n = 0;
3549*404b540aSrobert 
3550*404b540aSrobert   dt = FIRST_SPEC_TYPE;
3551*404b540aSrobert   do
3552*404b540aSrobert     {
3553*404b540aSrobert       if (ds & dt)
3554*404b540aSrobert 	{
3555*404b540aSrobert 	  res *= (ds_t) get_dep_weak (ds, dt);
3556*404b540aSrobert 	  n++;
3557*404b540aSrobert 	}
3558*404b540aSrobert 
3559*404b540aSrobert       if (dt == LAST_SPEC_TYPE)
3560*404b540aSrobert 	break;
3561*404b540aSrobert       dt <<= SPEC_TYPE_SHIFT;
3562*404b540aSrobert     }
3563*404b540aSrobert   while (1);
3564*404b540aSrobert 
3565*404b540aSrobert   gcc_assert (n);
3566*404b540aSrobert   while (--n)
3567*404b540aSrobert     res /= MAX_DEP_WEAK;
3568*404b540aSrobert 
3569*404b540aSrobert   if (res < MIN_DEP_WEAK)
3570*404b540aSrobert     res = MIN_DEP_WEAK;
3571*404b540aSrobert 
3572*404b540aSrobert   gcc_assert (res <= MAX_DEP_WEAK);
3573*404b540aSrobert 
3574*404b540aSrobert   return (dw_t) res;
3575*404b540aSrobert }
3576*404b540aSrobert 
3577*404b540aSrobert /* Helper function.
3578*404b540aSrobert    Find fallthru edge from PRED.  */
3579*404b540aSrobert static edge
find_fallthru_edge(basic_block pred)3580*404b540aSrobert find_fallthru_edge (basic_block pred)
3581*404b540aSrobert {
3582*404b540aSrobert   edge e;
3583*404b540aSrobert   edge_iterator ei;
3584*404b540aSrobert   basic_block succ;
3585*404b540aSrobert 
3586*404b540aSrobert   succ = pred->next_bb;
3587*404b540aSrobert   gcc_assert (succ->prev_bb == pred);
3588*404b540aSrobert 
3589*404b540aSrobert   if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
3590*404b540aSrobert     {
3591*404b540aSrobert       FOR_EACH_EDGE (e, ei, pred->succs)
3592*404b540aSrobert 	if (e->flags & EDGE_FALLTHRU)
3593*404b540aSrobert 	  {
3594*404b540aSrobert 	    gcc_assert (e->dest == succ);
3595*404b540aSrobert 	    return e;
3596*404b540aSrobert 	  }
3597*404b540aSrobert     }
3598*404b540aSrobert   else
3599*404b540aSrobert     {
3600*404b540aSrobert       FOR_EACH_EDGE (e, ei, succ->preds)
3601*404b540aSrobert 	if (e->flags & EDGE_FALLTHRU)
3602*404b540aSrobert 	  {
3603*404b540aSrobert 	    gcc_assert (e->src == pred);
3604*404b540aSrobert 	    return e;
3605*404b540aSrobert 	  }
3606*404b540aSrobert     }
3607*404b540aSrobert 
3608*404b540aSrobert   return NULL;
3609*404b540aSrobert }
3610*404b540aSrobert 
3611*404b540aSrobert /* Initialize BEFORE_RECOVERY variable.  */
3612*404b540aSrobert static void
init_before_recovery(void)3613*404b540aSrobert init_before_recovery (void)
3614*404b540aSrobert {
3615*404b540aSrobert   basic_block last;
3616*404b540aSrobert   edge e;
3617*404b540aSrobert 
3618*404b540aSrobert   last = EXIT_BLOCK_PTR->prev_bb;
3619*404b540aSrobert   e = find_fallthru_edge (last);
3620*404b540aSrobert 
3621*404b540aSrobert   if (e)
3622*404b540aSrobert     {
3623*404b540aSrobert       /* We create two basic blocks:
3624*404b540aSrobert          1. Single instruction block is inserted right after E->SRC
3625*404b540aSrobert          and has jump to
3626*404b540aSrobert          2. Empty block right before EXIT_BLOCK.
3627*404b540aSrobert          Between these two blocks recovery blocks will be emitted.  */
3628*404b540aSrobert 
3629*404b540aSrobert       basic_block single, empty;
3630*404b540aSrobert       rtx x, label;
3631*404b540aSrobert 
3632*404b540aSrobert       single = create_empty_bb (last);
3633*404b540aSrobert       empty = create_empty_bb (single);
3634*404b540aSrobert 
3635*404b540aSrobert       single->count = last->count;
3636*404b540aSrobert       empty->count = last->count;
3637*404b540aSrobert       single->frequency = last->frequency;
3638*404b540aSrobert       empty->frequency = last->frequency;
3639*404b540aSrobert       BB_COPY_PARTITION (single, last);
3640*404b540aSrobert       BB_COPY_PARTITION (empty, last);
3641*404b540aSrobert 
3642*404b540aSrobert       redirect_edge_succ (e, single);
3643*404b540aSrobert       make_single_succ_edge (single, empty, 0);
3644*404b540aSrobert       make_single_succ_edge (empty, EXIT_BLOCK_PTR,
3645*404b540aSrobert 			     EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
3646*404b540aSrobert 
3647*404b540aSrobert       label = block_label (empty);
3648*404b540aSrobert       x = emit_jump_insn_after (gen_jump (label), BB_END (single));
3649*404b540aSrobert       JUMP_LABEL (x) = label;
3650*404b540aSrobert       LABEL_NUSES (label)++;
3651*404b540aSrobert       extend_global (x);
3652*404b540aSrobert 
3653*404b540aSrobert       emit_barrier_after (x);
3654*404b540aSrobert 
3655*404b540aSrobert       add_block (empty, 0);
3656*404b540aSrobert       add_block (single, 0);
3657*404b540aSrobert 
3658*404b540aSrobert       before_recovery = single;
3659*404b540aSrobert 
3660*404b540aSrobert       if (sched_verbose >= 2 && spec_info->dump)
3661*404b540aSrobert         fprintf (spec_info->dump,
3662*404b540aSrobert 		 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
3663*404b540aSrobert                  last->index, single->index, empty->index);
3664*404b540aSrobert     }
3665*404b540aSrobert   else
3666*404b540aSrobert     before_recovery = last;
3667*404b540aSrobert }
3668*404b540aSrobert 
3669*404b540aSrobert /* Returns new recovery block.  */
3670*404b540aSrobert static basic_block
create_recovery_block(void)3671*404b540aSrobert create_recovery_block (void)
3672*404b540aSrobert {
3673*404b540aSrobert   rtx label;
3674*404b540aSrobert   rtx barrier;
3675*404b540aSrobert   basic_block rec;
3676*404b540aSrobert 
3677*404b540aSrobert   added_recovery_block_p = true;
3678*404b540aSrobert 
3679*404b540aSrobert   if (!before_recovery)
3680*404b540aSrobert     init_before_recovery ();
3681*404b540aSrobert 
3682*404b540aSrobert   barrier = get_last_bb_insn (before_recovery);
3683*404b540aSrobert   gcc_assert (BARRIER_P (barrier));
3684*404b540aSrobert 
3685*404b540aSrobert   label = emit_label_after (gen_label_rtx (), barrier);
3686*404b540aSrobert 
3687*404b540aSrobert   rec = create_basic_block (label, label, before_recovery);
3688*404b540aSrobert 
3689*404b540aSrobert   /* Recovery block always end with an unconditional jump.  */
3690*404b540aSrobert   emit_barrier_after (BB_END (rec));
3691*404b540aSrobert 
3692*404b540aSrobert   if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
3693*404b540aSrobert     BB_SET_PARTITION (rec, BB_COLD_PARTITION);
3694*404b540aSrobert 
3695*404b540aSrobert   if (sched_verbose && spec_info->dump)
3696*404b540aSrobert     fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
3697*404b540aSrobert              rec->index);
3698*404b540aSrobert 
3699*404b540aSrobert   before_recovery = rec;
3700*404b540aSrobert 
3701*404b540aSrobert   return rec;
3702*404b540aSrobert }
3703*404b540aSrobert 
3704*404b540aSrobert /* This function creates recovery code for INSN.  If MUTATE_P is nonzero,
3705*404b540aSrobert    INSN is a simple check, that should be converted to branchy one.  */
3706*404b540aSrobert static void
create_check_block_twin(rtx insn,bool mutate_p)3707*404b540aSrobert create_check_block_twin (rtx insn, bool mutate_p)
3708*404b540aSrobert {
3709*404b540aSrobert   basic_block rec;
3710*404b540aSrobert   rtx label, check, twin, link;
3711*404b540aSrobert   ds_t fs;
3712*404b540aSrobert 
3713*404b540aSrobert   gcc_assert (ORIG_PAT (insn)
3714*404b540aSrobert 	      && (!mutate_p
3715*404b540aSrobert 		  || (IS_SPECULATION_SIMPLE_CHECK_P (insn)
3716*404b540aSrobert 		      && !(TODO_SPEC (insn) & SPECULATIVE))));
3717*404b540aSrobert 
3718*404b540aSrobert   /* Create recovery block.  */
3719*404b540aSrobert   if (mutate_p || targetm.sched.needs_block_p (insn))
3720*404b540aSrobert     {
3721*404b540aSrobert       rec = create_recovery_block ();
3722*404b540aSrobert       label = BB_HEAD (rec);
3723*404b540aSrobert     }
3724*404b540aSrobert   else
3725*404b540aSrobert     {
3726*404b540aSrobert       rec = EXIT_BLOCK_PTR;
3727*404b540aSrobert       label = 0;
3728*404b540aSrobert     }
3729*404b540aSrobert 
3730*404b540aSrobert   /* Emit CHECK.  */
3731*404b540aSrobert   check = targetm.sched.gen_check (insn, label, mutate_p);
3732*404b540aSrobert 
3733*404b540aSrobert   if (rec != EXIT_BLOCK_PTR)
3734*404b540aSrobert     {
3735*404b540aSrobert       /* To have mem_reg alive at the beginning of second_bb,
3736*404b540aSrobert 	 we emit check BEFORE insn, so insn after splitting
3737*404b540aSrobert 	 insn will be at the beginning of second_bb, which will
3738*404b540aSrobert 	 provide us with the correct life information.  */
3739*404b540aSrobert       check = emit_jump_insn_before (check, insn);
3740*404b540aSrobert       JUMP_LABEL (check) = label;
3741*404b540aSrobert       LABEL_NUSES (label)++;
3742*404b540aSrobert     }
3743*404b540aSrobert   else
3744*404b540aSrobert     check = emit_insn_before (check, insn);
3745*404b540aSrobert 
3746*404b540aSrobert   /* Extend data structures.  */
3747*404b540aSrobert   extend_all (check);
3748*404b540aSrobert   RECOVERY_BLOCK (check) = rec;
3749*404b540aSrobert 
3750*404b540aSrobert   if (sched_verbose && spec_info->dump)
3751*404b540aSrobert     fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
3752*404b540aSrobert              (*current_sched_info->print_insn) (check, 0));
3753*404b540aSrobert 
3754*404b540aSrobert   gcc_assert (ORIG_PAT (insn));
3755*404b540aSrobert 
3756*404b540aSrobert   /* Initialize TWIN (twin is a duplicate of original instruction
3757*404b540aSrobert      in the recovery block).  */
3758*404b540aSrobert   if (rec != EXIT_BLOCK_PTR)
3759*404b540aSrobert     {
3760*404b540aSrobert       rtx link;
3761*404b540aSrobert 
3762*404b540aSrobert       for (link = RESOLVED_DEPS (insn); link; link = XEXP (link, 1))
3763*404b540aSrobert 	if (DEP_STATUS (link) & DEP_OUTPUT)
3764*404b540aSrobert 	  {
3765*404b540aSrobert 	    RESOLVED_DEPS (check) =
3766*404b540aSrobert 	      alloc_DEPS_LIST (XEXP (link, 0), RESOLVED_DEPS (check), DEP_TRUE);
3767*404b540aSrobert 	    PUT_REG_NOTE_KIND (RESOLVED_DEPS (check), REG_DEP_TRUE);
3768*404b540aSrobert 	  }
3769*404b540aSrobert 
3770*404b540aSrobert       twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
3771*404b540aSrobert       extend_global (twin);
3772*404b540aSrobert 
3773*404b540aSrobert       if (sched_verbose && spec_info->dump)
3774*404b540aSrobert 	/* INSN_BB (insn) isn't determined for twin insns yet.
3775*404b540aSrobert 	   So we can't use current_sched_info->print_insn.  */
3776*404b540aSrobert 	fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3777*404b540aSrobert 		 INSN_UID (twin), rec->index);
3778*404b540aSrobert     }
3779*404b540aSrobert   else
3780*404b540aSrobert     {
3781*404b540aSrobert       ORIG_PAT (check) = ORIG_PAT (insn);
3782*404b540aSrobert       HAS_INTERNAL_DEP (check) = 1;
3783*404b540aSrobert       twin = check;
3784*404b540aSrobert       /* ??? We probably should change all OUTPUT dependencies to
3785*404b540aSrobert 	 (TRUE | OUTPUT).  */
3786*404b540aSrobert     }
3787*404b540aSrobert 
3788*404b540aSrobert   RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
3789*404b540aSrobert 
3790*404b540aSrobert   if (rec != EXIT_BLOCK_PTR)
3791*404b540aSrobert     /* In case of branchy check, fix CFG.  */
3792*404b540aSrobert     {
3793*404b540aSrobert       basic_block first_bb, second_bb;
3794*404b540aSrobert       rtx jump;
3795*404b540aSrobert       edge e;
3796*404b540aSrobert       int edge_flags;
3797*404b540aSrobert 
3798*404b540aSrobert       first_bb = BLOCK_FOR_INSN (check);
3799*404b540aSrobert       e = split_block (first_bb, check);
3800*404b540aSrobert       /* split_block emits note if *check == BB_END.  Probably it
3801*404b540aSrobert 	 is better to rip that note off.  */
3802*404b540aSrobert       gcc_assert (e->src == first_bb);
3803*404b540aSrobert       second_bb = e->dest;
3804*404b540aSrobert 
3805*404b540aSrobert       /* This is fixing of incoming edge.  */
3806*404b540aSrobert       /* ??? Which other flags should be specified?  */
3807*404b540aSrobert       if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
3808*404b540aSrobert 	/* Partition type is the same, if it is "unpartitioned".  */
3809*404b540aSrobert 	edge_flags = EDGE_CROSSING;
3810*404b540aSrobert       else
3811*404b540aSrobert 	edge_flags = 0;
3812*404b540aSrobert 
3813*404b540aSrobert       e = make_edge (first_bb, rec, edge_flags);
3814*404b540aSrobert 
3815*404b540aSrobert       add_block (second_bb, first_bb);
3816*404b540aSrobert 
3817*404b540aSrobert       gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (second_bb)));
3818*404b540aSrobert       label = block_label (second_bb);
3819*404b540aSrobert       jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
3820*404b540aSrobert       JUMP_LABEL (jump) = label;
3821*404b540aSrobert       LABEL_NUSES (label)++;
3822*404b540aSrobert       extend_global (jump);
3823*404b540aSrobert 
3824*404b540aSrobert       if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
3825*404b540aSrobert 	/* Partition type is the same, if it is "unpartitioned".  */
3826*404b540aSrobert 	{
3827*404b540aSrobert 	  /* Rewritten from cfgrtl.c.  */
3828*404b540aSrobert 	  if (flag_reorder_blocks_and_partition
3829*404b540aSrobert 	      && targetm.have_named_sections
3830*404b540aSrobert 	      /*&& !any_condjump_p (jump)*/)
3831*404b540aSrobert 	    /* any_condjump_p (jump) == false.
3832*404b540aSrobert 	       We don't need the same note for the check because
3833*404b540aSrobert 	       any_condjump_p (check) == true.  */
3834*404b540aSrobert 	    {
3835*404b540aSrobert 	      REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
3836*404b540aSrobert 						    NULL_RTX,
3837*404b540aSrobert 						    REG_NOTES (jump));
3838*404b540aSrobert 	    }
3839*404b540aSrobert 	  edge_flags = EDGE_CROSSING;
3840*404b540aSrobert 	}
3841*404b540aSrobert       else
3842*404b540aSrobert 	edge_flags = 0;
3843*404b540aSrobert 
3844*404b540aSrobert       make_single_succ_edge (rec, second_bb, edge_flags);
3845*404b540aSrobert 
3846*404b540aSrobert       add_block (rec, EXIT_BLOCK_PTR);
3847*404b540aSrobert     }
3848*404b540aSrobert 
3849*404b540aSrobert   /* Move backward dependences from INSN to CHECK and
3850*404b540aSrobert      move forward dependences from INSN to TWIN.  */
3851*404b540aSrobert   for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
3852*404b540aSrobert     {
3853*404b540aSrobert       ds_t ds;
3854*404b540aSrobert 
3855*404b540aSrobert       /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
3856*404b540aSrobert 	 check --TRUE--> producer  ??? or ANTI ???
3857*404b540aSrobert 	 twin  --TRUE--> producer
3858*404b540aSrobert 	 twin  --ANTI--> check
3859*404b540aSrobert 
3860*404b540aSrobert 	 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
3861*404b540aSrobert 	 check --ANTI--> producer
3862*404b540aSrobert 	 twin  --ANTI--> producer
3863*404b540aSrobert 	 twin  --ANTI--> check
3864*404b540aSrobert 
3865*404b540aSrobert 	 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
3866*404b540aSrobert 	 check ~~TRUE~~> producer
3867*404b540aSrobert 	 twin  ~~TRUE~~> producer
3868*404b540aSrobert 	 twin  --ANTI--> check  */
3869*404b540aSrobert 
3870*404b540aSrobert       ds = DEP_STATUS (link);
3871*404b540aSrobert 
3872*404b540aSrobert       if (ds & BEGIN_SPEC)
3873*404b540aSrobert 	{
3874*404b540aSrobert 	  gcc_assert (!mutate_p);
3875*404b540aSrobert 	  ds &= ~BEGIN_SPEC;
3876*404b540aSrobert 	}
3877*404b540aSrobert 
3878*404b540aSrobert       if (rec != EXIT_BLOCK_PTR)
3879*404b540aSrobert 	{
3880*404b540aSrobert 	  add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3881*404b540aSrobert 	  add_back_forw_dep (twin, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3882*404b540aSrobert 	}
3883*404b540aSrobert       else
3884*404b540aSrobert 	add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3885*404b540aSrobert     }
3886*404b540aSrobert 
3887*404b540aSrobert   for (link = LOG_LINKS (insn); link;)
3888*404b540aSrobert     if ((DEP_STATUS (link) & BEGIN_SPEC)
3889*404b540aSrobert 	|| mutate_p)
3890*404b540aSrobert       /* We can delete this dep only if we totally overcome it with
3891*404b540aSrobert 	 BEGIN_SPECULATION.  */
3892*404b540aSrobert       {
3893*404b540aSrobert         delete_back_forw_dep (insn, XEXP (link, 0));
3894*404b540aSrobert         link = LOG_LINKS (insn);
3895*404b540aSrobert       }
3896*404b540aSrobert     else
3897*404b540aSrobert       link = XEXP (link, 1);
3898*404b540aSrobert 
3899*404b540aSrobert   fs = 0;
3900*404b540aSrobert 
3901*404b540aSrobert   /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
3902*404b540aSrobert      here.  */
3903*404b540aSrobert 
3904*404b540aSrobert   gcc_assert (!DONE_SPEC (insn));
3905*404b540aSrobert 
3906*404b540aSrobert   if (!mutate_p)
3907*404b540aSrobert     {
3908*404b540aSrobert       ds_t ts = TODO_SPEC (insn);
3909*404b540aSrobert 
3910*404b540aSrobert       DONE_SPEC (insn) = ts & BEGIN_SPEC;
3911*404b540aSrobert       CHECK_SPEC (check) = ts & BEGIN_SPEC;
3912*404b540aSrobert 
3913*404b540aSrobert       if (ts & BEGIN_DATA)
3914*404b540aSrobert 	fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
3915*404b540aSrobert       if (ts & BEGIN_CONTROL)
3916*404b540aSrobert 	fs = set_dep_weak (fs, BE_IN_CONTROL, get_dep_weak (ts, BEGIN_CONTROL));
3917*404b540aSrobert     }
3918*404b540aSrobert   else
3919*404b540aSrobert     CHECK_SPEC (check) = CHECK_SPEC (insn);
3920*404b540aSrobert 
3921*404b540aSrobert   /* Future speculations: call the helper.  */
3922*404b540aSrobert   process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, fs);
3923*404b540aSrobert 
3924*404b540aSrobert   if (rec != EXIT_BLOCK_PTR)
3925*404b540aSrobert     {
3926*404b540aSrobert       /* Which types of dependencies should we use here is,
3927*404b540aSrobert 	 generally, machine-dependent question...  But, for now,
3928*404b540aSrobert 	 it is not.  */
3929*404b540aSrobert 
3930*404b540aSrobert       if (!mutate_p)
3931*404b540aSrobert 	{
3932*404b540aSrobert 	  add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE);
3933*404b540aSrobert 	  add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3934*404b540aSrobert 	}
3935*404b540aSrobert       else
3936*404b540aSrobert 	{
3937*404b540aSrobert 	  if (spec_info->dump)
3938*404b540aSrobert 	    fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
3939*404b540aSrobert 		     (*current_sched_info->print_insn) (insn, 0));
3940*404b540aSrobert 
3941*404b540aSrobert 	  for (link = INSN_DEPEND (insn); link; link = INSN_DEPEND (insn))
3942*404b540aSrobert 	    delete_back_forw_dep (XEXP (link, 0), insn);
3943*404b540aSrobert 
3944*404b540aSrobert 	  if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
3945*404b540aSrobert 	    try_ready (check);
3946*404b540aSrobert 
3947*404b540aSrobert 	  sched_remove_insn (insn);
3948*404b540aSrobert 	}
3949*404b540aSrobert 
3950*404b540aSrobert       add_back_forw_dep (twin, check, REG_DEP_ANTI, DEP_ANTI);
3951*404b540aSrobert     }
3952*404b540aSrobert   else
3953*404b540aSrobert     add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
3954*404b540aSrobert 
3955*404b540aSrobert   if (!mutate_p)
3956*404b540aSrobert     /* Fix priorities.  If MUTATE_P is nonzero, this is not necessary,
3957*404b540aSrobert        because it'll be done later in add_to_speculative_block.  */
3958*404b540aSrobert     {
3959*404b540aSrobert       clear_priorities (twin);
3960*404b540aSrobert       calc_priorities (twin);
3961*404b540aSrobert     }
3962*404b540aSrobert }
3963*404b540aSrobert 
3964*404b540aSrobert /* Removes dependency between instructions in the recovery block REC
3965*404b540aSrobert    and usual region instructions.  It keeps inner dependences so it
3966*404b540aSrobert    won't be necessary to recompute them.  */
3967*404b540aSrobert static void
fix_recovery_deps(basic_block rec)3968*404b540aSrobert fix_recovery_deps (basic_block rec)
3969*404b540aSrobert {
3970*404b540aSrobert   rtx note, insn, link, jump, ready_list = 0;
3971*404b540aSrobert   bitmap_head in_ready;
3972*404b540aSrobert 
3973*404b540aSrobert   bitmap_initialize (&in_ready, 0);
3974*404b540aSrobert 
3975*404b540aSrobert   /* NOTE - a basic block note.  */
3976*404b540aSrobert   note = NEXT_INSN (BB_HEAD (rec));
3977*404b540aSrobert   gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
3978*404b540aSrobert   insn = BB_END (rec);
3979*404b540aSrobert   gcc_assert (JUMP_P (insn));
3980*404b540aSrobert   insn = PREV_INSN (insn);
3981*404b540aSrobert 
3982*404b540aSrobert   do
3983*404b540aSrobert     {
3984*404b540aSrobert       for (link = INSN_DEPEND (insn); link;)
3985*404b540aSrobert 	{
3986*404b540aSrobert 	  rtx consumer;
3987*404b540aSrobert 
3988*404b540aSrobert 	  consumer = XEXP (link, 0);
3989*404b540aSrobert 
3990*404b540aSrobert 	  if (BLOCK_FOR_INSN (consumer) != rec)
3991*404b540aSrobert 	    {
3992*404b540aSrobert 	      delete_back_forw_dep (consumer, insn);
3993*404b540aSrobert 
3994*404b540aSrobert 	      if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
3995*404b540aSrobert 		{
3996*404b540aSrobert 		  ready_list = alloc_INSN_LIST (consumer, ready_list);
3997*404b540aSrobert 		  bitmap_set_bit (&in_ready, INSN_LUID (consumer));
3998*404b540aSrobert 		}
3999*404b540aSrobert 
4000*404b540aSrobert 	      link = INSN_DEPEND (insn);
4001*404b540aSrobert 	    }
4002*404b540aSrobert 	  else
4003*404b540aSrobert 	    {
4004*404b540aSrobert 	      gcc_assert ((DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
4005*404b540aSrobert 
4006*404b540aSrobert 	      link = XEXP (link, 1);
4007*404b540aSrobert 	    }
4008*404b540aSrobert 	}
4009*404b540aSrobert 
4010*404b540aSrobert       insn = PREV_INSN (insn);
4011*404b540aSrobert     }
4012*404b540aSrobert   while (insn != note);
4013*404b540aSrobert 
4014*404b540aSrobert   bitmap_clear (&in_ready);
4015*404b540aSrobert 
4016*404b540aSrobert   /* Try to add instructions to the ready or queue list.  */
4017*404b540aSrobert   for (link = ready_list; link; link = XEXP (link, 1))
4018*404b540aSrobert     try_ready (XEXP (link, 0));
4019*404b540aSrobert   free_INSN_LIST_list (&ready_list);
4020*404b540aSrobert 
4021*404b540aSrobert   /* Fixing jump's dependences.  */
4022*404b540aSrobert   insn = BB_HEAD (rec);
4023*404b540aSrobert   jump = BB_END (rec);
4024*404b540aSrobert 
4025*404b540aSrobert   gcc_assert (LABEL_P (insn));
4026*404b540aSrobert   insn = NEXT_INSN (insn);
4027*404b540aSrobert 
4028*404b540aSrobert   gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4029*404b540aSrobert   add_jump_dependencies (insn, jump);
4030*404b540aSrobert }
4031*404b540aSrobert 
4032*404b540aSrobert /* The function saves line notes at the beginning of block B.  */
4033*404b540aSrobert static void
associate_line_notes_with_blocks(basic_block b)4034*404b540aSrobert associate_line_notes_with_blocks (basic_block b)
4035*404b540aSrobert {
4036*404b540aSrobert   rtx line;
4037*404b540aSrobert 
4038*404b540aSrobert   for (line = BB_HEAD (b); line; line = PREV_INSN (line))
4039*404b540aSrobert     if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
4040*404b540aSrobert       {
4041*404b540aSrobert         line_note_head[b->index] = line;
4042*404b540aSrobert         break;
4043*404b540aSrobert       }
4044*404b540aSrobert   /* Do a forward search as well, since we won't get to see the first
4045*404b540aSrobert      notes in a basic block.  */
4046*404b540aSrobert   for (line = BB_HEAD (b); line; line = NEXT_INSN (line))
4047*404b540aSrobert     {
4048*404b540aSrobert       if (INSN_P (line))
4049*404b540aSrobert         break;
4050*404b540aSrobert       if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
4051*404b540aSrobert         line_note_head[b->index] = line;
4052*404b540aSrobert     }
4053*404b540aSrobert }
4054*404b540aSrobert 
4055*404b540aSrobert /* Changes pattern of the INSN to NEW_PAT.  */
4056*404b540aSrobert static void
change_pattern(rtx insn,rtx new_pat)4057*404b540aSrobert change_pattern (rtx insn, rtx new_pat)
4058*404b540aSrobert {
4059*404b540aSrobert   int t;
4060*404b540aSrobert 
4061*404b540aSrobert   t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4062*404b540aSrobert   gcc_assert (t);
4063*404b540aSrobert   /* Invalidate INSN_COST, so it'll be recalculated.  */
4064*404b540aSrobert   INSN_COST (insn) = -1;
4065*404b540aSrobert   /* Invalidate INSN_TICK, so it'll be recalculated.  */
4066*404b540aSrobert   INSN_TICK (insn) = INVALID_TICK;
4067*404b540aSrobert   dfa_clear_single_insn_cache (insn);
4068*404b540aSrobert }
4069*404b540aSrobert 
4070*404b540aSrobert 
4071*404b540aSrobert /* -1 - can't speculate,
4072*404b540aSrobert    0 - for speculation with REQUEST mode it is OK to use
4073*404b540aSrobert    current instruction pattern,
4074*404b540aSrobert    1 - need to change pattern for *NEW_PAT to be speculative.  */
4075*404b540aSrobert static int
speculate_insn(rtx insn,ds_t request,rtx * new_pat)4076*404b540aSrobert speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4077*404b540aSrobert {
4078*404b540aSrobert   gcc_assert (current_sched_info->flags & DO_SPECULATION
4079*404b540aSrobert               && (request & SPECULATIVE));
4080*404b540aSrobert 
4081*404b540aSrobert   if (!NONJUMP_INSN_P (insn)
4082*404b540aSrobert       || HAS_INTERNAL_DEP (insn)
4083*404b540aSrobert       || SCHED_GROUP_P (insn)
4084*404b540aSrobert       || side_effects_p (PATTERN (insn))
4085*404b540aSrobert       || (request & spec_info->mask) != request)
4086*404b540aSrobert     return -1;
4087*404b540aSrobert 
4088*404b540aSrobert   gcc_assert (!IS_SPECULATION_CHECK_P (insn));
4089*404b540aSrobert 
4090*404b540aSrobert   if (request & BE_IN_SPEC)
4091*404b540aSrobert     {
4092*404b540aSrobert       if (may_trap_p (PATTERN (insn)))
4093*404b540aSrobert         return -1;
4094*404b540aSrobert 
4095*404b540aSrobert       if (!(request & BEGIN_SPEC))
4096*404b540aSrobert         return 0;
4097*404b540aSrobert     }
4098*404b540aSrobert 
4099*404b540aSrobert   return targetm.sched.speculate_insn (insn, request & BEGIN_SPEC, new_pat);
4100*404b540aSrobert }
4101*404b540aSrobert 
4102*404b540aSrobert /* Print some information about block BB, which starts with HEAD and
4103*404b540aSrobert    ends with TAIL, before scheduling it.
4104*404b540aSrobert    I is zero, if scheduler is about to start with the fresh ebb.  */
4105*404b540aSrobert static void
dump_new_block_header(int i,basic_block bb,rtx head,rtx tail)4106*404b540aSrobert dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4107*404b540aSrobert {
4108*404b540aSrobert   if (!i)
4109*404b540aSrobert     fprintf (sched_dump,
4110*404b540aSrobert 	     ";;   ======================================================\n");
4111*404b540aSrobert   else
4112*404b540aSrobert     fprintf (sched_dump,
4113*404b540aSrobert 	     ";;   =====================ADVANCING TO=====================\n");
4114*404b540aSrobert   fprintf (sched_dump,
4115*404b540aSrobert 	   ";;   -- basic block %d from %d to %d -- %s reload\n",
4116*404b540aSrobert 	   bb->index, INSN_UID (head), INSN_UID (tail),
4117*404b540aSrobert 	   (reload_completed ? "after" : "before"));
4118*404b540aSrobert   fprintf (sched_dump,
4119*404b540aSrobert 	   ";;   ======================================================\n");
4120*404b540aSrobert   fprintf (sched_dump, "\n");
4121*404b540aSrobert }
4122*404b540aSrobert 
4123*404b540aSrobert /* Unlink basic block notes and labels and saves them, so they
4124*404b540aSrobert    can be easily restored.  We unlink basic block notes in EBB to
4125*404b540aSrobert    provide back-compatibility with the previous code, as target backends
4126*404b540aSrobert    assume, that there'll be only instructions between
4127*404b540aSrobert    current_sched_info->{head and tail}.  We restore these notes as soon
4128*404b540aSrobert    as we can.
4129*404b540aSrobert    FIRST (LAST) is the first (last) basic block in the ebb.
4130*404b540aSrobert    NB: In usual case (FIRST == LAST) nothing is really done.  */
4131*404b540aSrobert void
unlink_bb_notes(basic_block first,basic_block last)4132*404b540aSrobert unlink_bb_notes (basic_block first, basic_block last)
4133*404b540aSrobert {
4134*404b540aSrobert   /* We DON'T unlink basic block notes of the first block in the ebb.  */
4135*404b540aSrobert   if (first == last)
4136*404b540aSrobert     return;
4137*404b540aSrobert 
4138*404b540aSrobert   bb_header = xmalloc (last_basic_block * sizeof (*bb_header));
4139*404b540aSrobert 
4140*404b540aSrobert   /* Make a sentinel.  */
4141*404b540aSrobert   if (last->next_bb != EXIT_BLOCK_PTR)
4142*404b540aSrobert     bb_header[last->next_bb->index] = 0;
4143*404b540aSrobert 
4144*404b540aSrobert   first = first->next_bb;
4145*404b540aSrobert   do
4146*404b540aSrobert     {
4147*404b540aSrobert       rtx prev, label, note, next;
4148*404b540aSrobert 
4149*404b540aSrobert       label = BB_HEAD (last);
4150*404b540aSrobert       if (LABEL_P (label))
4151*404b540aSrobert 	note = NEXT_INSN (label);
4152*404b540aSrobert       else
4153*404b540aSrobert 	note = label;
4154*404b540aSrobert       gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4155*404b540aSrobert 
4156*404b540aSrobert       prev = PREV_INSN (label);
4157*404b540aSrobert       next = NEXT_INSN (note);
4158*404b540aSrobert       gcc_assert (prev && next);
4159*404b540aSrobert 
4160*404b540aSrobert       NEXT_INSN (prev) = next;
4161*404b540aSrobert       PREV_INSN (next) = prev;
4162*404b540aSrobert 
4163*404b540aSrobert       bb_header[last->index] = label;
4164*404b540aSrobert 
4165*404b540aSrobert       if (last == first)
4166*404b540aSrobert 	break;
4167*404b540aSrobert 
4168*404b540aSrobert       last = last->prev_bb;
4169*404b540aSrobert     }
4170*404b540aSrobert   while (1);
4171*404b540aSrobert }
4172*404b540aSrobert 
4173*404b540aSrobert /* Restore basic block notes.
4174*404b540aSrobert    FIRST is the first basic block in the ebb.  */
4175*404b540aSrobert static void
restore_bb_notes(basic_block first)4176*404b540aSrobert restore_bb_notes (basic_block first)
4177*404b540aSrobert {
4178*404b540aSrobert   if (!bb_header)
4179*404b540aSrobert     return;
4180*404b540aSrobert 
4181*404b540aSrobert   /* We DON'T unlink basic block notes of the first block in the ebb.  */
4182*404b540aSrobert   first = first->next_bb;
4183*404b540aSrobert   /* Remember: FIRST is actually a second basic block in the ebb.  */
4184*404b540aSrobert 
4185*404b540aSrobert   while (first != EXIT_BLOCK_PTR
4186*404b540aSrobert 	 && bb_header[first->index])
4187*404b540aSrobert     {
4188*404b540aSrobert       rtx prev, label, note, next;
4189*404b540aSrobert 
4190*404b540aSrobert       label = bb_header[first->index];
4191*404b540aSrobert       prev = PREV_INSN (label);
4192*404b540aSrobert       next = NEXT_INSN (prev);
4193*404b540aSrobert 
4194*404b540aSrobert       if (LABEL_P (label))
4195*404b540aSrobert 	note = NEXT_INSN (label);
4196*404b540aSrobert       else
4197*404b540aSrobert 	note = label;
4198*404b540aSrobert       gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4199*404b540aSrobert 
4200*404b540aSrobert       bb_header[first->index] = 0;
4201*404b540aSrobert 
4202*404b540aSrobert       NEXT_INSN (prev) = label;
4203*404b540aSrobert       NEXT_INSN (note) = next;
4204*404b540aSrobert       PREV_INSN (next) = note;
4205*404b540aSrobert 
4206*404b540aSrobert       first = first->next_bb;
4207*404b540aSrobert     }
4208*404b540aSrobert 
4209*404b540aSrobert   free (bb_header);
4210*404b540aSrobert   bb_header = 0;
4211*404b540aSrobert }
4212*404b540aSrobert 
4213*404b540aSrobert /* Extend per basic block data structures of the scheduler.
4214*404b540aSrobert    If BB is NULL, initialize structures for the whole CFG.
4215*404b540aSrobert    Otherwise, initialize them for the just created BB.  */
4216*404b540aSrobert static void
extend_bb(basic_block bb)4217*404b540aSrobert extend_bb (basic_block bb)
4218*404b540aSrobert {
4219*404b540aSrobert   rtx insn;
4220*404b540aSrobert 
4221*404b540aSrobert   if (write_symbols != NO_DEBUG)
4222*404b540aSrobert     {
4223*404b540aSrobert       /* Save-line-note-head:
4224*404b540aSrobert          Determine the line-number at the start of each basic block.
4225*404b540aSrobert          This must be computed and saved now, because after a basic block's
4226*404b540aSrobert          predecessor has been scheduled, it is impossible to accurately
4227*404b540aSrobert          determine the correct line number for the first insn of the block.  */
4228*404b540aSrobert       line_note_head = xrecalloc (line_note_head, last_basic_block,
4229*404b540aSrobert 				  old_last_basic_block,
4230*404b540aSrobert 				  sizeof (*line_note_head));
4231*404b540aSrobert 
4232*404b540aSrobert       if (bb)
4233*404b540aSrobert 	associate_line_notes_with_blocks (bb);
4234*404b540aSrobert       else
4235*404b540aSrobert 	FOR_EACH_BB (bb)
4236*404b540aSrobert 	  associate_line_notes_with_blocks (bb);
4237*404b540aSrobert     }
4238*404b540aSrobert 
4239*404b540aSrobert   old_last_basic_block = last_basic_block;
4240*404b540aSrobert 
4241*404b540aSrobert   if (current_sched_info->flags & USE_GLAT)
4242*404b540aSrobert     {
4243*404b540aSrobert       glat_start = xrealloc (glat_start,
4244*404b540aSrobert                              last_basic_block * sizeof (*glat_start));
4245*404b540aSrobert       glat_end = xrealloc (glat_end, last_basic_block * sizeof (*glat_end));
4246*404b540aSrobert     }
4247*404b540aSrobert 
4248*404b540aSrobert   /* The following is done to keep current_sched_info->next_tail non null.  */
4249*404b540aSrobert 
4250*404b540aSrobert   insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4251*404b540aSrobert   if (NEXT_INSN (insn) == 0
4252*404b540aSrobert       || (!NOTE_P (insn)
4253*404b540aSrobert 	  && !LABEL_P (insn)
4254*404b540aSrobert 	  /* Don't emit a NOTE if it would end up before a BARRIER.  */
4255*404b540aSrobert 	  && !BARRIER_P (NEXT_INSN (insn))))
4256*404b540aSrobert     {
4257*404b540aSrobert       emit_note_after (NOTE_INSN_DELETED, insn);
4258*404b540aSrobert       /* Make insn to appear outside BB.  */
4259*404b540aSrobert       BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4260*404b540aSrobert     }
4261*404b540aSrobert }
4262*404b540aSrobert 
4263*404b540aSrobert /* Add a basic block BB to extended basic block EBB.
4264*404b540aSrobert    If EBB is EXIT_BLOCK_PTR, then BB is recovery block.
4265*404b540aSrobert    If EBB is NULL, then BB should be a new region.  */
4266*404b540aSrobert void
add_block(basic_block bb,basic_block ebb)4267*404b540aSrobert add_block (basic_block bb, basic_block ebb)
4268*404b540aSrobert {
4269*404b540aSrobert   gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO
4270*404b540aSrobert 	      && bb->il.rtl->global_live_at_start == 0
4271*404b540aSrobert 	      && bb->il.rtl->global_live_at_end == 0);
4272*404b540aSrobert 
4273*404b540aSrobert   extend_bb (bb);
4274*404b540aSrobert 
4275*404b540aSrobert   glat_start[bb->index] = 0;
4276*404b540aSrobert   glat_end[bb->index] = 0;
4277*404b540aSrobert 
4278*404b540aSrobert   if (current_sched_info->add_block)
4279*404b540aSrobert     /* This changes only data structures of the front-end.  */
4280*404b540aSrobert     current_sched_info->add_block (bb, ebb);
4281*404b540aSrobert }
4282*404b540aSrobert 
4283*404b540aSrobert /* Helper function.
4284*404b540aSrobert    Fix CFG after both in- and inter-block movement of
4285*404b540aSrobert    control_flow_insn_p JUMP.  */
4286*404b540aSrobert static void
fix_jump_move(rtx jump)4287*404b540aSrobert fix_jump_move (rtx jump)
4288*404b540aSrobert {
4289*404b540aSrobert   basic_block bb, jump_bb, jump_bb_next;
4290*404b540aSrobert 
4291*404b540aSrobert   bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4292*404b540aSrobert   jump_bb = BLOCK_FOR_INSN (jump);
4293*404b540aSrobert   jump_bb_next = jump_bb->next_bb;
4294*404b540aSrobert 
4295*404b540aSrobert   gcc_assert (current_sched_info->flags & SCHED_EBB
4296*404b540aSrobert 	      || IS_SPECULATION_BRANCHY_CHECK_P (jump));
4297*404b540aSrobert 
4298*404b540aSrobert   if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4299*404b540aSrobert     /* if jump_bb_next is not empty.  */
4300*404b540aSrobert     BB_END (jump_bb) = BB_END (jump_bb_next);
4301*404b540aSrobert 
4302*404b540aSrobert   if (BB_END (bb) != PREV_INSN (jump))
4303*404b540aSrobert     /* Then there are instruction after jump that should be placed
4304*404b540aSrobert        to jump_bb_next.  */
4305*404b540aSrobert     BB_END (jump_bb_next) = BB_END (bb);
4306*404b540aSrobert   else
4307*404b540aSrobert     /* Otherwise jump_bb_next is empty.  */
4308*404b540aSrobert     BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
4309*404b540aSrobert 
4310*404b540aSrobert   /* To make assertion in move_insn happy.  */
4311*404b540aSrobert   BB_END (bb) = PREV_INSN (jump);
4312*404b540aSrobert 
4313*404b540aSrobert   update_bb_for_insn (jump_bb_next);
4314*404b540aSrobert }
4315*404b540aSrobert 
4316*404b540aSrobert /* Fix CFG after interblock movement of control_flow_insn_p JUMP.  */
4317*404b540aSrobert static void
move_block_after_check(rtx jump)4318*404b540aSrobert move_block_after_check (rtx jump)
4319*404b540aSrobert {
4320*404b540aSrobert   basic_block bb, jump_bb, jump_bb_next;
4321*404b540aSrobert   VEC(edge,gc) *t;
4322*404b540aSrobert 
4323*404b540aSrobert   bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4324*404b540aSrobert   jump_bb = BLOCK_FOR_INSN (jump);
4325*404b540aSrobert   jump_bb_next = jump_bb->next_bb;
4326*404b540aSrobert 
4327*404b540aSrobert   update_bb_for_insn (jump_bb);
4328*404b540aSrobert 
4329*404b540aSrobert   gcc_assert (IS_SPECULATION_CHECK_P (jump)
4330*404b540aSrobert 	      || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
4331*404b540aSrobert 
4332*404b540aSrobert   unlink_block (jump_bb_next);
4333*404b540aSrobert   link_block (jump_bb_next, bb);
4334*404b540aSrobert 
4335*404b540aSrobert   t = bb->succs;
4336*404b540aSrobert   bb->succs = 0;
4337*404b540aSrobert   move_succs (&(jump_bb->succs), bb);
4338*404b540aSrobert   move_succs (&(jump_bb_next->succs), jump_bb);
4339*404b540aSrobert   move_succs (&t, jump_bb_next);
4340*404b540aSrobert 
4341*404b540aSrobert   if (current_sched_info->fix_recovery_cfg)
4342*404b540aSrobert     current_sched_info->fix_recovery_cfg
4343*404b540aSrobert       (bb->index, jump_bb->index, jump_bb_next->index);
4344*404b540aSrobert }
4345*404b540aSrobert 
4346*404b540aSrobert /* Helper function for move_block_after_check.
4347*404b540aSrobert    This functions attaches edge vector pointed to by SUCCSP to
4348*404b540aSrobert    block TO.  */
4349*404b540aSrobert static void
move_succs(VEC (edge,gc)** succsp,basic_block to)4350*404b540aSrobert move_succs (VEC(edge,gc) **succsp, basic_block to)
4351*404b540aSrobert {
4352*404b540aSrobert   edge e;
4353*404b540aSrobert   edge_iterator ei;
4354*404b540aSrobert 
4355*404b540aSrobert   gcc_assert (to->succs == 0);
4356*404b540aSrobert 
4357*404b540aSrobert   to->succs = *succsp;
4358*404b540aSrobert 
4359*404b540aSrobert   FOR_EACH_EDGE (e, ei, to->succs)
4360*404b540aSrobert     e->src = to;
4361*404b540aSrobert 
4362*404b540aSrobert   *succsp = 0;
4363*404b540aSrobert }
4364*404b540aSrobert 
4365*404b540aSrobert /* Initialize GLAT (global_live_at_{start, end}) structures.
4366*404b540aSrobert    GLAT structures are used to substitute global_live_{start, end}
4367*404b540aSrobert    regsets during scheduling.  This is necessary to use such functions as
4368*404b540aSrobert    split_block (), as they assume consistency of register live information.  */
4369*404b540aSrobert static void
init_glat(void)4370*404b540aSrobert init_glat (void)
4371*404b540aSrobert {
4372*404b540aSrobert   basic_block bb;
4373*404b540aSrobert 
4374*404b540aSrobert   FOR_ALL_BB (bb)
4375*404b540aSrobert     init_glat1 (bb);
4376*404b540aSrobert }
4377*404b540aSrobert 
4378*404b540aSrobert /* Helper function for init_glat.  */
4379*404b540aSrobert static void
init_glat1(basic_block bb)4380*404b540aSrobert init_glat1 (basic_block bb)
4381*404b540aSrobert {
4382*404b540aSrobert   gcc_assert (bb->il.rtl->global_live_at_start != 0
4383*404b540aSrobert 	      && bb->il.rtl->global_live_at_end != 0);
4384*404b540aSrobert 
4385*404b540aSrobert   glat_start[bb->index] = bb->il.rtl->global_live_at_start;
4386*404b540aSrobert   glat_end[bb->index] = bb->il.rtl->global_live_at_end;
4387*404b540aSrobert 
4388*404b540aSrobert   if (current_sched_info->flags & DETACH_LIFE_INFO)
4389*404b540aSrobert     {
4390*404b540aSrobert       bb->il.rtl->global_live_at_start = 0;
4391*404b540aSrobert       bb->il.rtl->global_live_at_end = 0;
4392*404b540aSrobert     }
4393*404b540aSrobert }
4394*404b540aSrobert 
4395*404b540aSrobert /* Attach reg_live_info back to basic blocks.
4396*404b540aSrobert    Also save regsets, that should not have been changed during scheduling,
4397*404b540aSrobert    for checking purposes (see check_reg_live).  */
4398*404b540aSrobert void
attach_life_info(void)4399*404b540aSrobert attach_life_info (void)
4400*404b540aSrobert {
4401*404b540aSrobert   basic_block bb;
4402*404b540aSrobert 
4403*404b540aSrobert   FOR_ALL_BB (bb)
4404*404b540aSrobert     attach_life_info1 (bb);
4405*404b540aSrobert }
4406*404b540aSrobert 
4407*404b540aSrobert /* Helper function for attach_life_info.  */
4408*404b540aSrobert static void
attach_life_info1(basic_block bb)4409*404b540aSrobert attach_life_info1 (basic_block bb)
4410*404b540aSrobert {
4411*404b540aSrobert   gcc_assert (bb->il.rtl->global_live_at_start == 0
4412*404b540aSrobert 	      && bb->il.rtl->global_live_at_end == 0);
4413*404b540aSrobert 
4414*404b540aSrobert   if (glat_start[bb->index])
4415*404b540aSrobert     {
4416*404b540aSrobert       gcc_assert (glat_end[bb->index]);
4417*404b540aSrobert 
4418*404b540aSrobert       bb->il.rtl->global_live_at_start = glat_start[bb->index];
4419*404b540aSrobert       bb->il.rtl->global_live_at_end = glat_end[bb->index];
4420*404b540aSrobert 
4421*404b540aSrobert       /* Make them NULL, so they won't be freed in free_glat.  */
4422*404b540aSrobert       glat_start[bb->index] = 0;
4423*404b540aSrobert       glat_end[bb->index] = 0;
4424*404b540aSrobert 
4425*404b540aSrobert #ifdef ENABLE_CHECKING
4426*404b540aSrobert       if (bb->index < NUM_FIXED_BLOCKS
4427*404b540aSrobert 	  || current_sched_info->region_head_or_leaf_p (bb, 0))
4428*404b540aSrobert 	{
4429*404b540aSrobert 	  glat_start[bb->index] = ALLOC_REG_SET (&reg_obstack);
4430*404b540aSrobert 	  COPY_REG_SET (glat_start[bb->index],
4431*404b540aSrobert 			bb->il.rtl->global_live_at_start);
4432*404b540aSrobert 	}
4433*404b540aSrobert 
4434*404b540aSrobert       if (bb->index < NUM_FIXED_BLOCKS
4435*404b540aSrobert 	  || current_sched_info->region_head_or_leaf_p (bb, 1))
4436*404b540aSrobert 	{
4437*404b540aSrobert 	  glat_end[bb->index] = ALLOC_REG_SET (&reg_obstack);
4438*404b540aSrobert 	  COPY_REG_SET (glat_end[bb->index], bb->il.rtl->global_live_at_end);
4439*404b540aSrobert 	}
4440*404b540aSrobert #endif
4441*404b540aSrobert     }
4442*404b540aSrobert   else
4443*404b540aSrobert     {
4444*404b540aSrobert       gcc_assert (!glat_end[bb->index]);
4445*404b540aSrobert 
4446*404b540aSrobert       bb->il.rtl->global_live_at_start = ALLOC_REG_SET (&reg_obstack);
4447*404b540aSrobert       bb->il.rtl->global_live_at_end = ALLOC_REG_SET (&reg_obstack);
4448*404b540aSrobert     }
4449*404b540aSrobert }
4450*404b540aSrobert 
4451*404b540aSrobert /* Free GLAT information.  */
4452*404b540aSrobert static void
free_glat(void)4453*404b540aSrobert free_glat (void)
4454*404b540aSrobert {
4455*404b540aSrobert #ifdef ENABLE_CHECKING
4456*404b540aSrobert   if (current_sched_info->flags & DETACH_LIFE_INFO)
4457*404b540aSrobert     {
4458*404b540aSrobert       basic_block bb;
4459*404b540aSrobert 
4460*404b540aSrobert       FOR_ALL_BB (bb)
4461*404b540aSrobert 	{
4462*404b540aSrobert 	  if (glat_start[bb->index])
4463*404b540aSrobert 	    FREE_REG_SET (glat_start[bb->index]);
4464*404b540aSrobert 	  if (glat_end[bb->index])
4465*404b540aSrobert 	    FREE_REG_SET (glat_end[bb->index]);
4466*404b540aSrobert 	}
4467*404b540aSrobert     }
4468*404b540aSrobert #endif
4469*404b540aSrobert 
4470*404b540aSrobert   free (glat_start);
4471*404b540aSrobert   free (glat_end);
4472*404b540aSrobert }
4473*404b540aSrobert 
4474*404b540aSrobert /* Remove INSN from the instruction stream.
4475*404b540aSrobert    INSN should have any dependencies.  */
4476*404b540aSrobert static void
sched_remove_insn(rtx insn)4477*404b540aSrobert sched_remove_insn (rtx insn)
4478*404b540aSrobert {
4479*404b540aSrobert   change_queue_index (insn, QUEUE_NOWHERE);
4480*404b540aSrobert   current_sched_info->add_remove_insn (insn, 1);
4481*404b540aSrobert   remove_insn (insn);
4482*404b540aSrobert }
4483*404b540aSrobert 
4484*404b540aSrobert /* Clear priorities of all instructions, that are
4485*404b540aSrobert    forward dependent on INSN.  */
4486*404b540aSrobert static void
clear_priorities(rtx insn)4487*404b540aSrobert clear_priorities (rtx insn)
4488*404b540aSrobert {
4489*404b540aSrobert   rtx link;
4490*404b540aSrobert 
4491*404b540aSrobert   for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
4492*404b540aSrobert     {
4493*404b540aSrobert       rtx pro;
4494*404b540aSrobert 
4495*404b540aSrobert       pro = XEXP (link, 0);
4496*404b540aSrobert       if (INSN_PRIORITY_KNOWN (pro))
4497*404b540aSrobert 	{
4498*404b540aSrobert 	  INSN_PRIORITY_KNOWN (pro) = 0;
4499*404b540aSrobert 	  clear_priorities (pro);
4500*404b540aSrobert 	}
4501*404b540aSrobert     }
4502*404b540aSrobert }
4503*404b540aSrobert 
4504*404b540aSrobert /* Recompute priorities of instructions, whose priorities might have been
4505*404b540aSrobert    changed due to changes in INSN.  */
4506*404b540aSrobert static void
calc_priorities(rtx insn)4507*404b540aSrobert calc_priorities (rtx insn)
4508*404b540aSrobert {
4509*404b540aSrobert   rtx link;
4510*404b540aSrobert 
4511*404b540aSrobert   for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
4512*404b540aSrobert     {
4513*404b540aSrobert       rtx pro;
4514*404b540aSrobert 
4515*404b540aSrobert       pro = XEXP (link, 0);
4516*404b540aSrobert       if (!INSN_PRIORITY_KNOWN (pro))
4517*404b540aSrobert 	{
4518*404b540aSrobert 	  priority (pro);
4519*404b540aSrobert 	  calc_priorities (pro);
4520*404b540aSrobert 	}
4521*404b540aSrobert     }
4522*404b540aSrobert }
4523*404b540aSrobert 
4524*404b540aSrobert 
4525*404b540aSrobert /* Add dependences between JUMP and other instructions in the recovery
4526*404b540aSrobert    block.  INSN is the first insn the recovery block.  */
4527*404b540aSrobert static void
add_jump_dependencies(rtx insn,rtx jump)4528*404b540aSrobert add_jump_dependencies (rtx insn, rtx jump)
4529*404b540aSrobert {
4530*404b540aSrobert   do
4531*404b540aSrobert     {
4532*404b540aSrobert       insn = NEXT_INSN (insn);
4533*404b540aSrobert       if (insn == jump)
4534*404b540aSrobert 	break;
4535*404b540aSrobert 
4536*404b540aSrobert       if (!INSN_DEPEND (insn))
4537*404b540aSrobert 	add_back_forw_dep (jump, insn, REG_DEP_ANTI, DEP_ANTI);
4538*404b540aSrobert     }
4539*404b540aSrobert   while (1);
4540*404b540aSrobert   gcc_assert (LOG_LINKS (jump));
4541*404b540aSrobert }
4542*404b540aSrobert 
4543*404b540aSrobert /* Return the NOTE_INSN_BASIC_BLOCK of BB.  */
4544*404b540aSrobert rtx
bb_note(basic_block bb)4545*404b540aSrobert bb_note (basic_block bb)
4546*404b540aSrobert {
4547*404b540aSrobert   rtx note;
4548*404b540aSrobert 
4549*404b540aSrobert   note = BB_HEAD (bb);
4550*404b540aSrobert   if (LABEL_P (note))
4551*404b540aSrobert     note = NEXT_INSN (note);
4552*404b540aSrobert 
4553*404b540aSrobert   gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4554*404b540aSrobert   return note;
4555*404b540aSrobert }
4556*404b540aSrobert 
4557*404b540aSrobert #ifdef ENABLE_CHECKING
4558*404b540aSrobert extern void debug_spec_status (ds_t);
4559*404b540aSrobert 
4560*404b540aSrobert /* Dump information about the dependence status S.  */
4561*404b540aSrobert void
debug_spec_status(ds_t s)4562*404b540aSrobert debug_spec_status (ds_t s)
4563*404b540aSrobert {
4564*404b540aSrobert   FILE *f = stderr;
4565*404b540aSrobert 
4566*404b540aSrobert   if (s & BEGIN_DATA)
4567*404b540aSrobert     fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak (s, BEGIN_DATA));
4568*404b540aSrobert   if (s & BE_IN_DATA)
4569*404b540aSrobert     fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak (s, BE_IN_DATA));
4570*404b540aSrobert   if (s & BEGIN_CONTROL)
4571*404b540aSrobert     fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak (s, BEGIN_CONTROL));
4572*404b540aSrobert   if (s & BE_IN_CONTROL)
4573*404b540aSrobert     fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak (s, BE_IN_CONTROL));
4574*404b540aSrobert 
4575*404b540aSrobert   if (s & HARD_DEP)
4576*404b540aSrobert     fprintf (f, "HARD_DEP; ");
4577*404b540aSrobert 
4578*404b540aSrobert   if (s & DEP_TRUE)
4579*404b540aSrobert     fprintf (f, "DEP_TRUE; ");
4580*404b540aSrobert   if (s & DEP_ANTI)
4581*404b540aSrobert     fprintf (f, "DEP_ANTI; ");
4582*404b540aSrobert   if (s & DEP_OUTPUT)
4583*404b540aSrobert     fprintf (f, "DEP_OUTPUT; ");
4584*404b540aSrobert 
4585*404b540aSrobert   fprintf (f, "\n");
4586*404b540aSrobert }
4587*404b540aSrobert 
4588*404b540aSrobert /* Helper function for check_cfg.
4589*404b540aSrobert    Return nonzero, if edge vector pointed to by EL has edge with TYPE in
4590*404b540aSrobert    its flags.  */
4591*404b540aSrobert static int
has_edge_p(VEC (edge,gc)* el,int type)4592*404b540aSrobert has_edge_p (VEC(edge,gc) *el, int type)
4593*404b540aSrobert {
4594*404b540aSrobert   edge e;
4595*404b540aSrobert   edge_iterator ei;
4596*404b540aSrobert 
4597*404b540aSrobert   FOR_EACH_EDGE (e, ei, el)
4598*404b540aSrobert     if (e->flags & type)
4599*404b540aSrobert       return 1;
4600*404b540aSrobert   return 0;
4601*404b540aSrobert }
4602*404b540aSrobert 
4603*404b540aSrobert /* Check few properties of CFG between HEAD and TAIL.
4604*404b540aSrobert    If HEAD (TAIL) is NULL check from the beginning (till the end) of the
4605*404b540aSrobert    instruction stream.  */
4606*404b540aSrobert static void
check_cfg(rtx head,rtx tail)4607*404b540aSrobert check_cfg (rtx head, rtx tail)
4608*404b540aSrobert {
4609*404b540aSrobert   rtx next_tail;
4610*404b540aSrobert   basic_block bb = 0;
4611*404b540aSrobert   int not_first = 0, not_last;
4612*404b540aSrobert 
4613*404b540aSrobert   if (head == NULL)
4614*404b540aSrobert     head = get_insns ();
4615*404b540aSrobert   if (tail == NULL)
4616*404b540aSrobert     tail = get_last_insn ();
4617*404b540aSrobert   next_tail = NEXT_INSN (tail);
4618*404b540aSrobert 
4619*404b540aSrobert   do
4620*404b540aSrobert     {
4621*404b540aSrobert       not_last = head != tail;
4622*404b540aSrobert 
4623*404b540aSrobert       if (not_first)
4624*404b540aSrobert 	gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
4625*404b540aSrobert       if (not_last)
4626*404b540aSrobert 	gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
4627*404b540aSrobert 
4628*404b540aSrobert       if (LABEL_P (head)
4629*404b540aSrobert 	  || (NOTE_INSN_BASIC_BLOCK_P (head)
4630*404b540aSrobert 	      && (!not_first
4631*404b540aSrobert 		  || (not_first && !LABEL_P (PREV_INSN (head))))))
4632*404b540aSrobert 	{
4633*404b540aSrobert 	  gcc_assert (bb == 0);
4634*404b540aSrobert 	  bb = BLOCK_FOR_INSN (head);
4635*404b540aSrobert 	  if (bb != 0)
4636*404b540aSrobert 	    gcc_assert (BB_HEAD (bb) == head);
4637*404b540aSrobert 	  else
4638*404b540aSrobert 	    /* This is the case of jump table.  See inside_basic_block_p ().  */
4639*404b540aSrobert 	    gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
4640*404b540aSrobert 	}
4641*404b540aSrobert 
4642*404b540aSrobert       if (bb == 0)
4643*404b540aSrobert 	{
4644*404b540aSrobert 	  gcc_assert (!inside_basic_block_p (head));
4645*404b540aSrobert 	  head = NEXT_INSN (head);
4646*404b540aSrobert 	}
4647*404b540aSrobert       else
4648*404b540aSrobert 	{
4649*404b540aSrobert 	  gcc_assert (inside_basic_block_p (head)
4650*404b540aSrobert 		      || NOTE_P (head));
4651*404b540aSrobert 	  gcc_assert (BLOCK_FOR_INSN (head) == bb);
4652*404b540aSrobert 
4653*404b540aSrobert 	  if (LABEL_P (head))
4654*404b540aSrobert 	    {
4655*404b540aSrobert 	      head = NEXT_INSN (head);
4656*404b540aSrobert 	      gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
4657*404b540aSrobert 	    }
4658*404b540aSrobert 	  else
4659*404b540aSrobert 	    {
4660*404b540aSrobert 	      if (control_flow_insn_p (head))
4661*404b540aSrobert 		{
4662*404b540aSrobert 		  gcc_assert (BB_END (bb) == head);
4663*404b540aSrobert 
4664*404b540aSrobert 		  if (any_uncondjump_p (head))
4665*404b540aSrobert 		    gcc_assert (EDGE_COUNT (bb->succs) == 1
4666*404b540aSrobert 				&& BARRIER_P (NEXT_INSN (head)));
4667*404b540aSrobert 		  else if (any_condjump_p (head))
4668*404b540aSrobert 		    gcc_assert (/* Usual case.  */
4669*404b540aSrobert                                 (EDGE_COUNT (bb->succs) > 1
4670*404b540aSrobert                                  && !BARRIER_P (NEXT_INSN (head)))
4671*404b540aSrobert                                 /* Or jump to the next instruction.  */
4672*404b540aSrobert                                 || (EDGE_COUNT (bb->succs) == 1
4673*404b540aSrobert                                     && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
4674*404b540aSrobert                                         == JUMP_LABEL (head))));
4675*404b540aSrobert 		}
4676*404b540aSrobert 	      if (BB_END (bb) == head)
4677*404b540aSrobert 		{
4678*404b540aSrobert 		  if (EDGE_COUNT (bb->succs) > 1)
4679*404b540aSrobert 		    gcc_assert (control_flow_insn_p (head)
4680*404b540aSrobert 				|| has_edge_p (bb->succs, EDGE_COMPLEX));
4681*404b540aSrobert 		  bb = 0;
4682*404b540aSrobert 		}
4683*404b540aSrobert 
4684*404b540aSrobert 	      head = NEXT_INSN (head);
4685*404b540aSrobert 	    }
4686*404b540aSrobert 	}
4687*404b540aSrobert 
4688*404b540aSrobert       not_first = 1;
4689*404b540aSrobert     }
4690*404b540aSrobert   while (head != next_tail);
4691*404b540aSrobert 
4692*404b540aSrobert   gcc_assert (bb == 0);
4693*404b540aSrobert }
4694*404b540aSrobert 
4695*404b540aSrobert /* Perform a few consistency checks of flags in different data structures.  */
4696*404b540aSrobert static void
check_sched_flags(void)4697*404b540aSrobert check_sched_flags (void)
4698*404b540aSrobert {
4699*404b540aSrobert   unsigned int f = current_sched_info->flags;
4700*404b540aSrobert 
4701*404b540aSrobert   if (flag_sched_stalled_insns)
4702*404b540aSrobert     gcc_assert (!(f & DO_SPECULATION));
4703*404b540aSrobert   if (f & DO_SPECULATION)
4704*404b540aSrobert     gcc_assert (!flag_sched_stalled_insns
4705*404b540aSrobert 		&& (f & DETACH_LIFE_INFO)
4706*404b540aSrobert 		&& spec_info
4707*404b540aSrobert 		&& spec_info->mask);
4708*404b540aSrobert   if (f & DETACH_LIFE_INFO)
4709*404b540aSrobert     gcc_assert (f & USE_GLAT);
4710*404b540aSrobert }
4711*404b540aSrobert 
4712*404b540aSrobert /* Check global_live_at_{start, end} regsets.
4713*404b540aSrobert    If FATAL_P is TRUE, then abort execution at the first failure.
4714*404b540aSrobert    Otherwise, print diagnostics to STDERR (this mode is for calling
4715*404b540aSrobert    from debugger).  */
4716*404b540aSrobert void
check_reg_live(bool fatal_p)4717*404b540aSrobert check_reg_live (bool fatal_p)
4718*404b540aSrobert {
4719*404b540aSrobert   basic_block bb;
4720*404b540aSrobert 
4721*404b540aSrobert   FOR_ALL_BB (bb)
4722*404b540aSrobert     {
4723*404b540aSrobert       int i;
4724*404b540aSrobert 
4725*404b540aSrobert       i = bb->index;
4726*404b540aSrobert 
4727*404b540aSrobert       if (glat_start[i])
4728*404b540aSrobert 	{
4729*404b540aSrobert 	  bool b = bitmap_equal_p (bb->il.rtl->global_live_at_start,
4730*404b540aSrobert 				   glat_start[i]);
4731*404b540aSrobert 
4732*404b540aSrobert 	  if (!b)
4733*404b540aSrobert 	    {
4734*404b540aSrobert 	      gcc_assert (!fatal_p);
4735*404b540aSrobert 
4736*404b540aSrobert 	      fprintf (stderr, ";; check_reg_live_at_start (%d) failed.\n", i);
4737*404b540aSrobert 	    }
4738*404b540aSrobert 	}
4739*404b540aSrobert 
4740*404b540aSrobert       if (glat_end[i])
4741*404b540aSrobert 	{
4742*404b540aSrobert 	  bool b = bitmap_equal_p (bb->il.rtl->global_live_at_end,
4743*404b540aSrobert 				   glat_end[i]);
4744*404b540aSrobert 
4745*404b540aSrobert 	  if (!b)
4746*404b540aSrobert 	    {
4747*404b540aSrobert 	      gcc_assert (!fatal_p);
4748*404b540aSrobert 
4749*404b540aSrobert 	      fprintf (stderr, ";; check_reg_live_at_end (%d) failed.\n", i);
4750*404b540aSrobert 	    }
4751*404b540aSrobert 	}
4752*404b540aSrobert     }
4753*404b540aSrobert }
4754*404b540aSrobert #endif /* ENABLE_CHECKING */
4755*404b540aSrobert 
4756*404b540aSrobert #endif /* INSN_SCHEDULING */
4757