xref: /dragonfly/contrib/gcc-8.0/gcc/sched-int.h (revision 38fd1498)
1*38fd1498Szrj /* Instruction scheduling pass.  This file contains definitions used
2*38fd1498Szrj    internally in the scheduler.
3*38fd1498Szrj    Copyright (C) 1992-2018 Free Software Foundation, Inc.
4*38fd1498Szrj 
5*38fd1498Szrj This file is part of GCC.
6*38fd1498Szrj 
7*38fd1498Szrj GCC is free software; you can redistribute it and/or modify it under
8*38fd1498Szrj the terms of the GNU General Public License as published by the Free
9*38fd1498Szrj Software Foundation; either version 3, or (at your option) any later
10*38fd1498Szrj version.
11*38fd1498Szrj 
12*38fd1498Szrj GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13*38fd1498Szrj WARRANTY; without even the implied warranty of MERCHANTABILITY or
14*38fd1498Szrj FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15*38fd1498Szrj for more details.
16*38fd1498Szrj 
17*38fd1498Szrj You should have received a copy of the GNU General Public License
18*38fd1498Szrj along with GCC; see the file COPYING3.  If not see
19*38fd1498Szrj <http://www.gnu.org/licenses/>.  */
20*38fd1498Szrj 
21*38fd1498Szrj #ifndef GCC_SCHED_INT_H
22*38fd1498Szrj #define GCC_SCHED_INT_H
23*38fd1498Szrj 
24*38fd1498Szrj #ifdef INSN_SCHEDULING
25*38fd1498Szrj 
26*38fd1498Szrj /* Identificator of a scheduler pass.  */
27*38fd1498Szrj enum sched_pass_id_t { SCHED_PASS_UNKNOWN, SCHED_RGN_PASS, SCHED_EBB_PASS,
28*38fd1498Szrj 		       SCHED_SMS_PASS, SCHED_SEL_PASS };
29*38fd1498Szrj 
30*38fd1498Szrj /* The algorithm used to implement -fsched-pressure.  */
31*38fd1498Szrj enum sched_pressure_algorithm
32*38fd1498Szrj {
33*38fd1498Szrj   SCHED_PRESSURE_NONE,
34*38fd1498Szrj   SCHED_PRESSURE_WEIGHTED,
35*38fd1498Szrj   SCHED_PRESSURE_MODEL
36*38fd1498Szrj };
37*38fd1498Szrj 
38*38fd1498Szrj typedef vec<basic_block> bb_vec_t;
39*38fd1498Szrj typedef vec<rtx_insn *> insn_vec_t;
40*38fd1498Szrj typedef vec<rtx_insn *> rtx_vec_t;
41*38fd1498Szrj 
42*38fd1498Szrj extern void sched_init_bbs (void);
43*38fd1498Szrj 
44*38fd1498Szrj extern void sched_extend_luids (void);
45*38fd1498Szrj extern void sched_init_insn_luid (rtx_insn *);
46*38fd1498Szrj extern void sched_init_luids (bb_vec_t);
47*38fd1498Szrj extern void sched_finish_luids (void);
48*38fd1498Szrj 
49*38fd1498Szrj extern void sched_extend_target (void);
50*38fd1498Szrj 
51*38fd1498Szrj extern void haifa_init_h_i_d (bb_vec_t);
52*38fd1498Szrj extern void haifa_finish_h_i_d (void);
53*38fd1498Szrj 
54*38fd1498Szrj /* Hooks that are common to all the schedulers.  */
55*38fd1498Szrj struct common_sched_info_def
56*38fd1498Szrj {
57*38fd1498Szrj   /* Called after blocks were rearranged due to movement of jump instruction.
58*38fd1498Szrj      The first parameter - index of basic block, in which jump currently is.
59*38fd1498Szrj      The second parameter - index of basic block, in which jump used
60*38fd1498Szrj      to be.
61*38fd1498Szrj      The third parameter - index of basic block, that follows the second
62*38fd1498Szrj      parameter.  */
63*38fd1498Szrj   void (*fix_recovery_cfg) (int, int, int);
64*38fd1498Szrj 
65*38fd1498Szrj   /* Called to notify frontend, that new basic block is being added.
66*38fd1498Szrj      The first parameter - new basic block.
67*38fd1498Szrj      The second parameter - block, after which new basic block is being added,
68*38fd1498Szrj      or the exit block, if recovery block is being added,
69*38fd1498Szrj      or NULL, if standalone block is being added.  */
70*38fd1498Szrj   void (*add_block) (basic_block, basic_block);
71*38fd1498Szrj 
72*38fd1498Szrj   /* Estimate number of insns in the basic block.  */
73*38fd1498Szrj   int (*estimate_number_of_insns) (basic_block);
74*38fd1498Szrj 
75*38fd1498Szrj   /* Given a non-insn (!INSN_P (x)) return
76*38fd1498Szrj      -1 - if this rtx don't need a luid.
77*38fd1498Szrj      0 - if it should have the same luid as the previous insn.
78*38fd1498Szrj      1 - if it needs a separate luid.  */
79*38fd1498Szrj   int (*luid_for_non_insn) (rtx);
80*38fd1498Szrj 
81*38fd1498Szrj   /* Scheduler pass identifier.  It is preferably used in assertions.  */
82*38fd1498Szrj   enum sched_pass_id_t sched_pass_id;
83*38fd1498Szrj };
84*38fd1498Szrj 
85*38fd1498Szrj extern struct common_sched_info_def *common_sched_info;
86*38fd1498Szrj 
87*38fd1498Szrj extern const struct common_sched_info_def haifa_common_sched_info;
88*38fd1498Szrj 
89*38fd1498Szrj /* Return true if selective scheduling pass is working.  */
90*38fd1498Szrj static inline bool
sel_sched_p(void)91*38fd1498Szrj sel_sched_p (void)
92*38fd1498Szrj {
93*38fd1498Szrj   return common_sched_info->sched_pass_id == SCHED_SEL_PASS;
94*38fd1498Szrj }
95*38fd1498Szrj 
96*38fd1498Szrj /* Returns maximum priority that an insn was assigned to.  */
97*38fd1498Szrj extern int get_rgn_sched_max_insns_priority (void);
98*38fd1498Szrj 
99*38fd1498Szrj /* Increases effective priority for INSN by AMOUNT.  */
100*38fd1498Szrj extern void sel_add_to_insn_priority (rtx, int);
101*38fd1498Szrj 
102*38fd1498Szrj /* True if during selective scheduling we need to emulate some of haifa
103*38fd1498Szrj    scheduler behavior.  */
104*38fd1498Szrj extern int sched_emulate_haifa_p;
105*38fd1498Szrj 
106*38fd1498Szrj /* Mapping from INSN_UID to INSN_LUID.  In the end all other per insn data
107*38fd1498Szrj    structures should be indexed by luid.  */
108*38fd1498Szrj extern vec<int> sched_luids;
109*38fd1498Szrj #define INSN_LUID(INSN) (sched_luids[INSN_UID (INSN)])
110*38fd1498Szrj #define LUID_BY_UID(UID) (sched_luids[UID])
111*38fd1498Szrj 
112*38fd1498Szrj #define SET_INSN_LUID(INSN, LUID) \
113*38fd1498Szrj (sched_luids[INSN_UID (INSN)] = (LUID))
114*38fd1498Szrj 
115*38fd1498Szrj /* The highest INSN_LUID.  */
116*38fd1498Szrj extern int sched_max_luid;
117*38fd1498Szrj 
118*38fd1498Szrj extern int insn_luid (rtx);
119*38fd1498Szrj 
120*38fd1498Szrj /* This list holds ripped off notes from the current block.  These notes will
121*38fd1498Szrj    be attached to the beginning of the block when its scheduling is
122*38fd1498Szrj    finished.  */
123*38fd1498Szrj extern rtx_insn *note_list;
124*38fd1498Szrj 
125*38fd1498Szrj extern void remove_notes (rtx_insn *, rtx_insn *);
126*38fd1498Szrj extern rtx_insn *restore_other_notes (rtx_insn *, basic_block);
127*38fd1498Szrj extern void sched_insns_init (rtx);
128*38fd1498Szrj extern void sched_insns_finish (void);
129*38fd1498Szrj 
130*38fd1498Szrj extern void *xrecalloc (void *, size_t, size_t, size_t);
131*38fd1498Szrj 
132*38fd1498Szrj extern void reemit_notes (rtx_insn *);
133*38fd1498Szrj 
134*38fd1498Szrj /* Functions in haifa-sched.c.  */
135*38fd1498Szrj extern int haifa_classify_insn (const_rtx);
136*38fd1498Szrj 
137*38fd1498Szrj /* Functions in sel-sched-ir.c.  */
138*38fd1498Szrj extern void sel_find_rgns (void);
139*38fd1498Szrj extern void sel_mark_hard_insn (rtx);
140*38fd1498Szrj 
141*38fd1498Szrj extern size_t dfa_state_size;
142*38fd1498Szrj 
143*38fd1498Szrj extern void advance_state (state_t);
144*38fd1498Szrj 
145*38fd1498Szrj extern void setup_sched_dump (void);
146*38fd1498Szrj extern void sched_init (void);
147*38fd1498Szrj extern void sched_finish (void);
148*38fd1498Szrj 
149*38fd1498Szrj extern bool sel_insn_is_speculation_check (rtx);
150*38fd1498Szrj 
151*38fd1498Szrj /* Describe the ready list of the scheduler.
152*38fd1498Szrj    VEC holds space enough for all insns in the current region.  VECLEN
153*38fd1498Szrj    says how many exactly.
154*38fd1498Szrj    FIRST is the index of the element with the highest priority; i.e. the
155*38fd1498Szrj    last one in the ready list, since elements are ordered by ascending
156*38fd1498Szrj    priority.
157*38fd1498Szrj    N_READY determines how many insns are on the ready list.
158*38fd1498Szrj    N_DEBUG determines how many debug insns are on the ready list.  */
159*38fd1498Szrj struct ready_list
160*38fd1498Szrj {
161*38fd1498Szrj   rtx_insn **vec;
162*38fd1498Szrj   int veclen;
163*38fd1498Szrj   int first;
164*38fd1498Szrj   int n_ready;
165*38fd1498Szrj   int n_debug;
166*38fd1498Szrj };
167*38fd1498Szrj 
168*38fd1498Szrj extern signed char *ready_try;
169*38fd1498Szrj extern struct ready_list ready;
170*38fd1498Szrj 
171*38fd1498Szrj extern int max_issue (struct ready_list *, int, state_t, bool, int *);
172*38fd1498Szrj 
173*38fd1498Szrj extern void ebb_compute_jump_reg_dependencies (rtx, regset);
174*38fd1498Szrj 
175*38fd1498Szrj extern edge find_fallthru_edge_from (basic_block);
176*38fd1498Szrj 
177*38fd1498Szrj extern void (* sched_init_only_bb) (basic_block, basic_block);
178*38fd1498Szrj extern basic_block (* sched_split_block) (basic_block, rtx);
179*38fd1498Szrj extern basic_block sched_split_block_1 (basic_block, rtx);
180*38fd1498Szrj extern basic_block (* sched_create_empty_bb) (basic_block);
181*38fd1498Szrj extern basic_block sched_create_empty_bb_1 (basic_block);
182*38fd1498Szrj 
183*38fd1498Szrj extern basic_block sched_create_recovery_block (basic_block *);
184*38fd1498Szrj extern void sched_create_recovery_edges (basic_block, basic_block,
185*38fd1498Szrj 					 basic_block);
186*38fd1498Szrj 
187*38fd1498Szrj /* Pointer to data describing the current DFA state.  */
188*38fd1498Szrj extern state_t curr_state;
189*38fd1498Szrj 
190*38fd1498Szrj /* Type to represent status of a dependence.  */
191*38fd1498Szrj typedef unsigned int ds_t;
192*38fd1498Szrj #define BITS_PER_DEP_STATUS HOST_BITS_PER_INT
193*38fd1498Szrj 
194*38fd1498Szrj /* Type to represent weakness of speculative dependence.  */
195*38fd1498Szrj typedef unsigned int dw_t;
196*38fd1498Szrj 
197*38fd1498Szrj extern enum reg_note ds_to_dk (ds_t);
198*38fd1498Szrj extern ds_t dk_to_ds (enum reg_note);
199*38fd1498Szrj 
200*38fd1498Szrj /* Describe a dependency that can be broken by making a replacement
201*38fd1498Szrj    in one of the patterns.  LOC is the location, ORIG and NEWVAL the
202*38fd1498Szrj    two alternative contents, and INSN the instruction that must be
203*38fd1498Szrj    changed.  */
204*38fd1498Szrj struct dep_replacement
205*38fd1498Szrj {
206*38fd1498Szrj   rtx *loc;
207*38fd1498Szrj   rtx orig;
208*38fd1498Szrj   rtx newval;
209*38fd1498Szrj   rtx_insn *insn;
210*38fd1498Szrj };
211*38fd1498Szrj 
212*38fd1498Szrj /* Information about the dependency.  */
213*38fd1498Szrj struct _dep
214*38fd1498Szrj {
215*38fd1498Szrj   /* Producer.  */
216*38fd1498Szrj   rtx_insn *pro;
217*38fd1498Szrj 
218*38fd1498Szrj   /* Consumer.  */
219*38fd1498Szrj   rtx_insn *con;
220*38fd1498Szrj 
221*38fd1498Szrj   /* If nonnull, holds a pointer to information about how to break the
222*38fd1498Szrj      dependency by making a replacement in one of the insns.  There is
223*38fd1498Szrj      only one such dependency for each insn that must be modified in
224*38fd1498Szrj      order to break such a dependency.  */
225*38fd1498Szrj   struct dep_replacement *replace;
226*38fd1498Szrj 
227*38fd1498Szrj   /* Dependency status.  This field holds all dependency types and additional
228*38fd1498Szrj      information for speculative dependencies.  */
229*38fd1498Szrj   ds_t status;
230*38fd1498Szrj 
231*38fd1498Szrj   /* Dependency major type.  This field is superseded by STATUS above.
232*38fd1498Szrj      Though, it is still in place because some targets use it.  */
233*38fd1498Szrj   ENUM_BITFIELD(reg_note) type:6;
234*38fd1498Szrj 
235*38fd1498Szrj   unsigned nonreg:1;
236*38fd1498Szrj   unsigned multiple:1;
237*38fd1498Szrj 
238*38fd1498Szrj   /* Cached cost of the dependency.  Make sure to update UNKNOWN_DEP_COST
239*38fd1498Szrj      when changing the size of this field.  */
240*38fd1498Szrj   int cost:20;
241*38fd1498Szrj };
242*38fd1498Szrj 
243*38fd1498Szrj #define UNKNOWN_DEP_COST ((int) ((unsigned int) -1 << 19))
244*38fd1498Szrj 
245*38fd1498Szrj typedef struct _dep dep_def;
246*38fd1498Szrj typedef dep_def *dep_t;
247*38fd1498Szrj 
248*38fd1498Szrj #define DEP_PRO(D) ((D)->pro)
249*38fd1498Szrj #define DEP_CON(D) ((D)->con)
250*38fd1498Szrj #define DEP_TYPE(D) ((D)->type)
251*38fd1498Szrj #define DEP_STATUS(D) ((D)->status)
252*38fd1498Szrj #define DEP_COST(D) ((D)->cost)
253*38fd1498Szrj #define DEP_NONREG(D) ((D)->nonreg)
254*38fd1498Szrj #define DEP_MULTIPLE(D) ((D)->multiple)
255*38fd1498Szrj #define DEP_REPLACE(D) ((D)->replace)
256*38fd1498Szrj 
257*38fd1498Szrj /* Functions to work with dep.  */
258*38fd1498Szrj 
259*38fd1498Szrj extern void init_dep_1 (dep_t, rtx_insn *, rtx_insn *, enum reg_note, ds_t);
260*38fd1498Szrj extern void init_dep (dep_t, rtx_insn *, rtx_insn *, enum reg_note);
261*38fd1498Szrj 
262*38fd1498Szrj extern void sd_debug_dep (dep_t);
263*38fd1498Szrj 
264*38fd1498Szrj /* Definition of this struct resides below.  */
265*38fd1498Szrj struct _dep_node;
266*38fd1498Szrj typedef struct _dep_node *dep_node_t;
267*38fd1498Szrj 
268*38fd1498Szrj /* A link in the dependency list.  This is essentially an equivalent of a
269*38fd1498Szrj    single {INSN, DEPS}_LIST rtx.  */
270*38fd1498Szrj struct _dep_link
271*38fd1498Szrj {
272*38fd1498Szrj   /* Dep node with all the data.  */
273*38fd1498Szrj   dep_node_t node;
274*38fd1498Szrj 
275*38fd1498Szrj   /* Next link in the list. For the last one it is NULL.  */
276*38fd1498Szrj   struct _dep_link *next;
277*38fd1498Szrj 
278*38fd1498Szrj   /* Pointer to the next field of the previous link in the list.
279*38fd1498Szrj      For the first link this points to the deps_list->first.
280*38fd1498Szrj 
281*38fd1498Szrj      With help of this field it is easy to remove and insert links to the
282*38fd1498Szrj      list.  */
283*38fd1498Szrj   struct _dep_link **prev_nextp;
284*38fd1498Szrj };
285*38fd1498Szrj typedef struct _dep_link *dep_link_t;
286*38fd1498Szrj 
287*38fd1498Szrj #define DEP_LINK_NODE(N) ((N)->node)
288*38fd1498Szrj #define DEP_LINK_NEXT(N) ((N)->next)
289*38fd1498Szrj #define DEP_LINK_PREV_NEXTP(N) ((N)->prev_nextp)
290*38fd1498Szrj 
291*38fd1498Szrj /* Macros to work dep_link.  For most usecases only part of the dependency
292*38fd1498Szrj    information is need.  These macros conveniently provide that piece of
293*38fd1498Szrj    information.  */
294*38fd1498Szrj 
295*38fd1498Szrj #define DEP_LINK_DEP(N) (DEP_NODE_DEP (DEP_LINK_NODE (N)))
296*38fd1498Szrj #define DEP_LINK_PRO(N) (DEP_PRO (DEP_LINK_DEP (N)))
297*38fd1498Szrj #define DEP_LINK_CON(N) (DEP_CON (DEP_LINK_DEP (N)))
298*38fd1498Szrj #define DEP_LINK_TYPE(N) (DEP_TYPE (DEP_LINK_DEP (N)))
299*38fd1498Szrj #define DEP_LINK_STATUS(N) (DEP_STATUS (DEP_LINK_DEP (N)))
300*38fd1498Szrj 
301*38fd1498Szrj /* A list of dep_links.  */
302*38fd1498Szrj struct _deps_list
303*38fd1498Szrj {
304*38fd1498Szrj   /* First element.  */
305*38fd1498Szrj   dep_link_t first;
306*38fd1498Szrj 
307*38fd1498Szrj   /* Total number of elements in the list.  */
308*38fd1498Szrj   int n_links;
309*38fd1498Szrj };
310*38fd1498Szrj typedef struct _deps_list *deps_list_t;
311*38fd1498Szrj 
312*38fd1498Szrj #define DEPS_LIST_FIRST(L) ((L)->first)
313*38fd1498Szrj #define DEPS_LIST_N_LINKS(L) ((L)->n_links)
314*38fd1498Szrj 
315*38fd1498Szrj /* Suppose we have a dependence Y between insn pro1 and con1, where pro1 has
316*38fd1498Szrj    additional dependents con0 and con2, and con1 is dependent on additional
317*38fd1498Szrj    insns pro0 and pro1:
318*38fd1498Szrj 
319*38fd1498Szrj    .con0      pro0
320*38fd1498Szrj    . ^         |
321*38fd1498Szrj    . |         |
322*38fd1498Szrj    . |         |
323*38fd1498Szrj    . X         A
324*38fd1498Szrj    . |         |
325*38fd1498Szrj    . |         |
326*38fd1498Szrj    . |         V
327*38fd1498Szrj    .pro1--Y-->con1
328*38fd1498Szrj    . |         ^
329*38fd1498Szrj    . |         |
330*38fd1498Szrj    . |         |
331*38fd1498Szrj    . Z         B
332*38fd1498Szrj    . |         |
333*38fd1498Szrj    . |         |
334*38fd1498Szrj    . V         |
335*38fd1498Szrj    .con2      pro2
336*38fd1498Szrj 
337*38fd1498Szrj    This is represented using a "dep_node" for each dependence arc, which are
338*38fd1498Szrj    connected as follows (diagram is centered around Y which is fully shown;
339*38fd1498Szrj    other dep_nodes shown partially):
340*38fd1498Szrj 
341*38fd1498Szrj    .          +------------+    +--------------+    +------------+
342*38fd1498Szrj    .          : dep_node X :    |  dep_node Y  |    : dep_node Z :
343*38fd1498Szrj    .          :            :    |              |    :            :
344*38fd1498Szrj    .          :            :    |              |    :            :
345*38fd1498Szrj    .          : forw       :    |  forw        |    : forw       :
346*38fd1498Szrj    .          : +--------+ :    |  +--------+  |    : +--------+ :
347*38fd1498Szrj    forw_deps  : |dep_link| :    |  |dep_link|  |    : |dep_link| :
348*38fd1498Szrj    +-----+    : | +----+ | :    |  | +----+ |  |    : | +----+ | :
349*38fd1498Szrj    |first|----->| |next|-+------+->| |next|-+--+----->| |next|-+--->NULL
350*38fd1498Szrj    +-----+    : | +----+ | :    |  | +----+ |  |    : | +----+ | :
351*38fd1498Szrj    . ^  ^     : |     ^  | :    |  |     ^  |  |    : |        | :
352*38fd1498Szrj    . |  |     : |     |  | :    |  |     |  |  |    : |        | :
353*38fd1498Szrj    . |  +--<----+--+  +--+---<--+--+--+  +--+--+--<---+--+     | :
354*38fd1498Szrj    . |        : |  |     | :    |  |  |     |  |    : |  |     | :
355*38fd1498Szrj    . |        : | +----+ | :    |  | +----+ |  |    : | +----+ | :
356*38fd1498Szrj    . |        : | |prev| | :    |  | |prev| |  |    : | |prev| | :
357*38fd1498Szrj    . |        : | |next| | :    |  | |next| |  |    : | |next| | :
358*38fd1498Szrj    . |        : | +----+ | :    |  | +----+ |  |    : | +----+ | :
359*38fd1498Szrj    . |        : |        | :<-+ |  |        |  |<-+ : |        | :<-+
360*38fd1498Szrj    . |        : | +----+ | :  | |  | +----+ |  |  | : | +----+ | :  |
361*38fd1498Szrj    . |        : | |node|-+----+ |  | |node|-+--+--+ : | |node|-+----+
362*38fd1498Szrj    . |        : | +----+ | :    |  | +----+ |  |    : | +----+ | :
363*38fd1498Szrj    . |        : |        | :    |  |        |  |    : |        | :
364*38fd1498Szrj    . |        : +--------+ :    |  +--------+  |    : +--------+ :
365*38fd1498Szrj    . |        :            :    |              |    :            :
366*38fd1498Szrj    . |        :  SAME pro1 :    |  +--------+  |    :  SAME pro1 :
367*38fd1498Szrj    . |        :  DIFF con0 :    |  |dep     |  |    :  DIFF con2 :
368*38fd1498Szrj    . |        :            :    |  |        |  |    :            :
369*38fd1498Szrj    . |                          |  | +----+ |  |
370*38fd1498Szrj    .RTX<------------------------+--+-|pro1| |  |
371*38fd1498Szrj    .pro1                        |  | +----+ |  |
372*38fd1498Szrj    .                            |  |        |  |
373*38fd1498Szrj    .                            |  | +----+ |  |
374*38fd1498Szrj    .RTX<------------------------+--+-|con1| |  |
375*38fd1498Szrj    .con1                        |  | +----+ |  |
376*38fd1498Szrj    . |                          |  |        |  |
377*38fd1498Szrj    . |                          |  | +----+ |  |
378*38fd1498Szrj    . |                          |  | |kind| |  |
379*38fd1498Szrj    . |                          |  | +----+ |  |
380*38fd1498Szrj    . |        :            :    |  | |stat| |  |    :            :
381*38fd1498Szrj    . |        :  DIFF pro0 :    |  | +----+ |  |    :  DIFF pro2 :
382*38fd1498Szrj    . |        :  SAME con1 :    |  |        |  |    :  SAME con1 :
383*38fd1498Szrj    . |        :            :    |  +--------+  |    :            :
384*38fd1498Szrj    . |        :            :    |              |    :            :
385*38fd1498Szrj    . |        : back       :    |  back        |    : back       :
386*38fd1498Szrj    . v        : +--------+ :    |  +--------+  |    : +--------+ :
387*38fd1498Szrj    back_deps  : |dep_link| :    |  |dep_link|  |    : |dep_link| :
388*38fd1498Szrj    +-----+    : | +----+ | :    |  | +----+ |  |    : | +----+ | :
389*38fd1498Szrj    |first|----->| |next|-+------+->| |next|-+--+----->| |next|-+--->NULL
390*38fd1498Szrj    +-----+    : | +----+ | :    |  | +----+ |  |    : | +----+ | :
391*38fd1498Szrj    .    ^     : |     ^  | :    |  |     ^  |  |    : |        | :
392*38fd1498Szrj    .    |     : |     |  | :    |  |     |  |  |    : |        | :
393*38fd1498Szrj    .    +--<----+--+  +--+---<--+--+--+  +--+--+--<---+--+     | :
394*38fd1498Szrj    .          : |  |     | :    |  |  |     |  |    : |  |     | :
395*38fd1498Szrj    .          : | +----+ | :    |  | +----+ |  |    : | +----+ | :
396*38fd1498Szrj    .          : | |prev| | :    |  | |prev| |  |    : | |prev| | :
397*38fd1498Szrj    .          : | |next| | :    |  | |next| |  |    : | |next| | :
398*38fd1498Szrj    .          : | +----+ | :    |  | +----+ |  |    : | +----+ | :
399*38fd1498Szrj    .          : |        | :<-+ |  |        |  |<-+ : |        | :<-+
400*38fd1498Szrj    .          : | +----+ | :  | |  | +----+ |  |  | : | +----+ | :  |
401*38fd1498Szrj    .          : | |node|-+----+ |  | |node|-+--+--+ : | |node|-+----+
402*38fd1498Szrj    .          : | +----+ | :    |  | +----+ |  |    : | +----+ | :
403*38fd1498Szrj    .          : |        | :    |  |        |  |    : |        | :
404*38fd1498Szrj    .          : +--------+ :    |  +--------+  |    : +--------+ :
405*38fd1498Szrj    .          :            :    |              |    :            :
406*38fd1498Szrj    .          : dep_node A :    |  dep_node Y  |    : dep_node B :
407*38fd1498Szrj    .          +------------+    +--------------+    +------------+
408*38fd1498Szrj */
409*38fd1498Szrj 
410*38fd1498Szrj struct _dep_node
411*38fd1498Szrj {
412*38fd1498Szrj   /* Backward link.  */
413*38fd1498Szrj   struct _dep_link back;
414*38fd1498Szrj 
415*38fd1498Szrj   /* The dep.  */
416*38fd1498Szrj   struct _dep dep;
417*38fd1498Szrj 
418*38fd1498Szrj   /* Forward link.  */
419*38fd1498Szrj   struct _dep_link forw;
420*38fd1498Szrj };
421*38fd1498Szrj 
422*38fd1498Szrj #define DEP_NODE_BACK(N) (&(N)->back)
423*38fd1498Szrj #define DEP_NODE_DEP(N) (&(N)->dep)
424*38fd1498Szrj #define DEP_NODE_FORW(N) (&(N)->forw)
425*38fd1498Szrj 
426*38fd1498Szrj /* The following enumeration values tell us what dependencies we
427*38fd1498Szrj    should use to implement the barrier.  We use true-dependencies for
428*38fd1498Szrj    TRUE_BARRIER and anti-dependencies for MOVE_BARRIER.  */
429*38fd1498Szrj enum reg_pending_barrier_mode
430*38fd1498Szrj {
431*38fd1498Szrj   NOT_A_BARRIER = 0,
432*38fd1498Szrj   MOVE_BARRIER,
433*38fd1498Szrj   TRUE_BARRIER
434*38fd1498Szrj };
435*38fd1498Szrj 
436*38fd1498Szrj /* Whether a register movement is associated with a call.  */
437*38fd1498Szrj enum post_call_group
438*38fd1498Szrj {
439*38fd1498Szrj   not_post_call,
440*38fd1498Szrj   post_call,
441*38fd1498Szrj   post_call_initial
442*38fd1498Szrj };
443*38fd1498Szrj 
444*38fd1498Szrj /* Insns which affect pseudo-registers.  */
445*38fd1498Szrj struct deps_reg
446*38fd1498Szrj {
447*38fd1498Szrj   rtx_insn_list *uses;
448*38fd1498Szrj   rtx_insn_list *sets;
449*38fd1498Szrj   rtx_insn_list *implicit_sets;
450*38fd1498Szrj   rtx_insn_list *control_uses;
451*38fd1498Szrj   rtx_insn_list *clobbers;
452*38fd1498Szrj   int uses_length;
453*38fd1498Szrj   int clobbers_length;
454*38fd1498Szrj };
455*38fd1498Szrj 
456*38fd1498Szrj /* Describe state of dependencies used during sched_analyze phase.  */
457*38fd1498Szrj struct deps_desc
458*38fd1498Szrj {
459*38fd1498Szrj   /* The *_insns and *_mems are paired lists.  Each pending memory operation
460*38fd1498Szrj      will have a pointer to the MEM rtx on one list and a pointer to the
461*38fd1498Szrj      containing insn on the other list in the same place in the list.  */
462*38fd1498Szrj 
463*38fd1498Szrj   /* We can't use add_dependence like the old code did, because a single insn
464*38fd1498Szrj      may have multiple memory accesses, and hence needs to be on the list
465*38fd1498Szrj      once for each memory access.  Add_dependence won't let you add an insn
466*38fd1498Szrj      to a list more than once.  */
467*38fd1498Szrj 
468*38fd1498Szrj   /* An INSN_LIST containing all insns with pending read operations.  */
469*38fd1498Szrj   rtx_insn_list *pending_read_insns;
470*38fd1498Szrj 
471*38fd1498Szrj   /* An EXPR_LIST containing all MEM rtx's which are pending reads.  */
472*38fd1498Szrj   rtx_expr_list *pending_read_mems;
473*38fd1498Szrj 
474*38fd1498Szrj   /* An INSN_LIST containing all insns with pending write operations.  */
475*38fd1498Szrj   rtx_insn_list *pending_write_insns;
476*38fd1498Szrj 
477*38fd1498Szrj   /* An EXPR_LIST containing all MEM rtx's which are pending writes.  */
478*38fd1498Szrj   rtx_expr_list *pending_write_mems;
479*38fd1498Szrj 
480*38fd1498Szrj   /* An INSN_LIST containing all jump insns.  */
481*38fd1498Szrj   rtx_insn_list *pending_jump_insns;
482*38fd1498Szrj 
483*38fd1498Szrj   /* We must prevent the above lists from ever growing too large since
484*38fd1498Szrj      the number of dependencies produced is at least O(N*N),
485*38fd1498Szrj      and execution time is at least O(4*N*N), as a function of the
486*38fd1498Szrj      length of these pending lists.  */
487*38fd1498Szrj 
488*38fd1498Szrj   /* Indicates the length of the pending_read list.  */
489*38fd1498Szrj   int pending_read_list_length;
490*38fd1498Szrj 
491*38fd1498Szrj   /* Indicates the length of the pending_write list.  */
492*38fd1498Szrj   int pending_write_list_length;
493*38fd1498Szrj 
494*38fd1498Szrj   /* Length of the pending memory flush list plus the length of the pending
495*38fd1498Szrj      jump insn list.  Large functions with no calls may build up extremely
496*38fd1498Szrj      large lists.  */
497*38fd1498Szrj   int pending_flush_length;
498*38fd1498Szrj 
499*38fd1498Szrj   /* The last insn upon which all memory references must depend.
500*38fd1498Szrj      This is an insn which flushed the pending lists, creating a dependency
501*38fd1498Szrj      between it and all previously pending memory references.  This creates
502*38fd1498Szrj      a barrier (or a checkpoint) which no memory reference is allowed to cross.
503*38fd1498Szrj 
504*38fd1498Szrj      This includes all non constant CALL_INSNs.  When we do interprocedural
505*38fd1498Szrj      alias analysis, this restriction can be relaxed.
506*38fd1498Szrj      This may also be an INSN that writes memory if the pending lists grow
507*38fd1498Szrj      too large.  */
508*38fd1498Szrj   rtx_insn_list *last_pending_memory_flush;
509*38fd1498Szrj 
510*38fd1498Szrj   /* A list of the last function calls we have seen.  We use a list to
511*38fd1498Szrj      represent last function calls from multiple predecessor blocks.
512*38fd1498Szrj      Used to prevent register lifetimes from expanding unnecessarily.  */
513*38fd1498Szrj   rtx_insn_list *last_function_call;
514*38fd1498Szrj 
515*38fd1498Szrj   /* A list of the last function calls that may not return normally
516*38fd1498Szrj      we have seen.  We use a list to represent last function calls from
517*38fd1498Szrj      multiple predecessor blocks.  Used to prevent moving trapping insns
518*38fd1498Szrj      across such calls.  */
519*38fd1498Szrj   rtx_insn_list *last_function_call_may_noreturn;
520*38fd1498Szrj 
521*38fd1498Szrj   /* A list of insns which use a pseudo register that does not already
522*38fd1498Szrj      cross a call.  We create dependencies between each of those insn
523*38fd1498Szrj      and the next call insn, to ensure that they won't cross a call after
524*38fd1498Szrj      scheduling is done.  */
525*38fd1498Szrj   rtx_insn_list *sched_before_next_call;
526*38fd1498Szrj 
527*38fd1498Szrj   /* Similarly, a list of insns which should not cross a branch.  */
528*38fd1498Szrj   rtx_insn_list *sched_before_next_jump;
529*38fd1498Szrj 
530*38fd1498Szrj   /* Used to keep post-call pseudo/hard reg movements together with
531*38fd1498Szrj      the call.  */
532*38fd1498Szrj   enum post_call_group in_post_call_group_p;
533*38fd1498Szrj 
534*38fd1498Szrj   /* The last debug insn we've seen.  */
535*38fd1498Szrj   rtx_insn *last_debug_insn;
536*38fd1498Szrj 
537*38fd1498Szrj   /* The last insn bearing REG_ARGS_SIZE that we've seen.  */
538*38fd1498Szrj   rtx_insn *last_args_size;
539*38fd1498Szrj 
540*38fd1498Szrj   /* A list of all prologue insns we have seen without intervening epilogue
541*38fd1498Szrj      insns, and one of all epilogue insns we have seen without intervening
542*38fd1498Szrj      prologue insns.  This is used to prevent mixing prologue and epilogue
543*38fd1498Szrj      insns.  See PR78029.  */
544*38fd1498Szrj   rtx_insn_list *last_prologue;
545*38fd1498Szrj   rtx_insn_list *last_epilogue;
546*38fd1498Szrj 
547*38fd1498Szrj   /* Whether the last *logue insn was an epilogue insn or a prologue insn
548*38fd1498Szrj      instead.  */
549*38fd1498Szrj   bool last_logue_was_epilogue;
550*38fd1498Szrj 
551*38fd1498Szrj   /* The maximum register number for the following arrays.  Before reload
552*38fd1498Szrj      this is max_reg_num; after reload it is FIRST_PSEUDO_REGISTER.  */
553*38fd1498Szrj   int max_reg;
554*38fd1498Szrj 
555*38fd1498Szrj   /* Element N is the next insn that sets (hard or pseudo) register
556*38fd1498Szrj      N within the current basic block; or zero, if there is no
557*38fd1498Szrj      such insn.  Needed for new registers which may be introduced
558*38fd1498Szrj      by splitting insns.  */
559*38fd1498Szrj   struct deps_reg *reg_last;
560*38fd1498Szrj 
561*38fd1498Szrj   /* Element N is set for each register that has any nonzero element
562*38fd1498Szrj      in reg_last[N].{uses,sets,clobbers}.  */
563*38fd1498Szrj   regset_head reg_last_in_use;
564*38fd1498Szrj 
565*38fd1498Szrj   /* Shows the last value of reg_pending_barrier associated with the insn.  */
566*38fd1498Szrj   enum reg_pending_barrier_mode last_reg_pending_barrier;
567*38fd1498Szrj 
568*38fd1498Szrj   /* True when this context should be treated as a readonly by
569*38fd1498Szrj      the analysis.  */
570*38fd1498Szrj   BOOL_BITFIELD readonly : 1;
571*38fd1498Szrj };
572*38fd1498Szrj 
573*38fd1498Szrj typedef struct deps_desc *deps_t;
574*38fd1498Szrj 
575*38fd1498Szrj /* This structure holds some state of the current scheduling pass, and
576*38fd1498Szrj    contains some function pointers that abstract out some of the non-generic
577*38fd1498Szrj    functionality from functions such as schedule_block or schedule_insn.
578*38fd1498Szrj    There is one global variable, current_sched_info, which points to the
579*38fd1498Szrj    sched_info structure currently in use.  */
580*38fd1498Szrj struct haifa_sched_info
581*38fd1498Szrj {
582*38fd1498Szrj   /* Add all insns that are initially ready to the ready list.  Called once
583*38fd1498Szrj      before scheduling a set of insns.  */
584*38fd1498Szrj   void (*init_ready_list) (void);
585*38fd1498Szrj   /* Called after taking an insn from the ready list.  Returns nonzero if
586*38fd1498Szrj      this insn can be scheduled, nonzero if we should silently discard it.  */
587*38fd1498Szrj   int (*can_schedule_ready_p) (rtx_insn *);
588*38fd1498Szrj   /* Return nonzero if there are more insns that should be scheduled.  */
589*38fd1498Szrj   int (*schedule_more_p) (void);
590*38fd1498Szrj   /* Called after an insn has all its hard dependencies resolved.
591*38fd1498Szrj      Adjusts status of instruction (which is passed through second parameter)
592*38fd1498Szrj      to indicate if instruction should be moved to the ready list or the
593*38fd1498Szrj      queue, or if it should silently discard it (until next resolved
594*38fd1498Szrj      dependence).  */
595*38fd1498Szrj   ds_t (*new_ready) (rtx_insn *, ds_t);
596*38fd1498Szrj   /* Compare priority of two insns.  Return a positive number if the second
597*38fd1498Szrj      insn is to be preferred for scheduling, and a negative one if the first
598*38fd1498Szrj      is to be preferred.  Zero if they are equally good.  */
599*38fd1498Szrj   int (*rank) (rtx_insn *, rtx_insn *);
600*38fd1498Szrj   /* Return a string that contains the insn uid and optionally anything else
601*38fd1498Szrj      necessary to identify this insn in an output.  It's valid to use a
602*38fd1498Szrj      static buffer for this.  The ALIGNED parameter should cause the string
603*38fd1498Szrj      to be formatted so that multiple output lines will line up nicely.  */
604*38fd1498Szrj   const char *(*print_insn) (const rtx_insn *, int);
605*38fd1498Szrj   /* Return nonzero if an insn should be included in priority
606*38fd1498Szrj      calculations.  */
607*38fd1498Szrj   int (*contributes_to_priority) (rtx_insn *, rtx_insn *);
608*38fd1498Szrj 
609*38fd1498Szrj   /* Return true if scheduling insn (passed as the parameter) will trigger
610*38fd1498Szrj      finish of scheduling current block.  */
611*38fd1498Szrj   bool (*insn_finishes_block_p) (rtx_insn *);
612*38fd1498Szrj 
613*38fd1498Szrj   /* The boundaries of the set of insns to be scheduled.  */
614*38fd1498Szrj   rtx_insn *prev_head, *next_tail;
615*38fd1498Szrj 
616*38fd1498Szrj   /* Filled in after the schedule is finished; the first and last scheduled
617*38fd1498Szrj      insns.  */
618*38fd1498Szrj   rtx_insn *head, *tail;
619*38fd1498Szrj 
620*38fd1498Szrj   /* If nonzero, enables an additional sanity check in schedule_block.  */
621*38fd1498Szrj   unsigned int queue_must_finish_empty:1;
622*38fd1498Szrj 
623*38fd1498Szrj   /* Maximum priority that has been assigned to an insn.  */
624*38fd1498Szrj   int sched_max_insns_priority;
625*38fd1498Szrj 
626*38fd1498Szrj   /* Hooks to support speculative scheduling.  */
627*38fd1498Szrj 
628*38fd1498Szrj   /* Called to notify frontend that instruction is being added (second
629*38fd1498Szrj      parameter == 0) or removed (second parameter == 1).  */
630*38fd1498Szrj   void (*add_remove_insn) (rtx_insn *, int);
631*38fd1498Szrj 
632*38fd1498Szrj   /* Called to notify the frontend that instruction INSN is being
633*38fd1498Szrj      scheduled.  */
634*38fd1498Szrj   void (*begin_schedule_ready) (rtx_insn *insn);
635*38fd1498Szrj 
636*38fd1498Szrj   /* Called to notify the frontend that an instruction INSN is about to be
637*38fd1498Szrj      moved to its correct place in the final schedule.  This is done for all
638*38fd1498Szrj      insns in order of the schedule.  LAST indicates the last scheduled
639*38fd1498Szrj      instruction.  */
640*38fd1498Szrj   void (*begin_move_insn) (rtx_insn *insn, rtx_insn *last);
641*38fd1498Szrj 
642*38fd1498Szrj   /* If the second parameter is not NULL, return nonnull value, if the
643*38fd1498Szrj      basic block should be advanced.
644*38fd1498Szrj      If the second parameter is NULL, return the next basic block in EBB.
645*38fd1498Szrj      The first parameter is the current basic block in EBB.  */
646*38fd1498Szrj   basic_block (*advance_target_bb) (basic_block, rtx_insn *);
647*38fd1498Szrj 
648*38fd1498Szrj   /* Allocate memory, store the frontend scheduler state in it, and
649*38fd1498Szrj      return it.  */
650*38fd1498Szrj   void *(*save_state) (void);
651*38fd1498Szrj   /* Restore frontend scheduler state from the argument, and free the
652*38fd1498Szrj      memory.  */
653*38fd1498Szrj   void (*restore_state) (void *);
654*38fd1498Szrj 
655*38fd1498Szrj   /* ??? FIXME: should use straight bitfields inside sched_info instead of
656*38fd1498Szrj      this flag field.  */
657*38fd1498Szrj   unsigned int flags;
658*38fd1498Szrj };
659*38fd1498Szrj 
660*38fd1498Szrj /* This structure holds description of the properties for speculative
661*38fd1498Szrj    scheduling.  */
662*38fd1498Szrj struct spec_info_def
663*38fd1498Szrj {
664*38fd1498Szrj   /* Holds types of allowed speculations: BEGIN_{DATA|CONTROL},
665*38fd1498Szrj      BE_IN_{DATA_CONTROL}.  */
666*38fd1498Szrj   int mask;
667*38fd1498Szrj 
668*38fd1498Szrj   /* A dump file for additional information on speculative scheduling.  */
669*38fd1498Szrj   FILE *dump;
670*38fd1498Szrj 
671*38fd1498Szrj   /* Minimal cumulative weakness of speculative instruction's
672*38fd1498Szrj      dependencies, so that insn will be scheduled.  */
673*38fd1498Szrj   dw_t data_weakness_cutoff;
674*38fd1498Szrj 
675*38fd1498Szrj   /* Minimal usefulness of speculative instruction to be considered for
676*38fd1498Szrj      scheduling.  */
677*38fd1498Szrj   int control_weakness_cutoff;
678*38fd1498Szrj 
679*38fd1498Szrj   /* Flags from the enum SPEC_SCHED_FLAGS.  */
680*38fd1498Szrj   int flags;
681*38fd1498Szrj };
682*38fd1498Szrj typedef struct spec_info_def *spec_info_t;
683*38fd1498Szrj 
684*38fd1498Szrj extern spec_info_t spec_info;
685*38fd1498Szrj 
686*38fd1498Szrj extern struct haifa_sched_info *current_sched_info;
687*38fd1498Szrj 
688*38fd1498Szrj /* Do register pressure sensitive insn scheduling if the flag is set
689*38fd1498Szrj    up.  */
690*38fd1498Szrj extern enum sched_pressure_algorithm sched_pressure;
691*38fd1498Szrj 
692*38fd1498Szrj /* Map regno -> its pressure class.  The map defined only when
693*38fd1498Szrj    SCHED_PRESSURE_P is true.  */
694*38fd1498Szrj extern enum reg_class *sched_regno_pressure_class;
695*38fd1498Szrj 
696*38fd1498Szrj /* Indexed by INSN_UID, the collection of all data associated with
697*38fd1498Szrj    a single instruction.  */
698*38fd1498Szrj 
699*38fd1498Szrj struct _haifa_deps_insn_data
700*38fd1498Szrj {
701*38fd1498Szrj   /* The number of incoming edges in the forward dependency graph.
702*38fd1498Szrj      As scheduling proceeds, counts are decreased.  An insn moves to
703*38fd1498Szrj      the ready queue when its counter reaches zero.  */
704*38fd1498Szrj   int dep_count;
705*38fd1498Szrj 
706*38fd1498Szrj   /* Nonzero if instruction has internal dependence
707*38fd1498Szrj      (e.g. add_dependence was invoked with (insn == elem)).  */
708*38fd1498Szrj   unsigned int has_internal_dep;
709*38fd1498Szrj 
710*38fd1498Szrj   /* NB: We can't place 'struct _deps_list' here instead of deps_list_t into
711*38fd1498Szrj      h_i_d because when h_i_d extends, addresses of the deps_list->first
712*38fd1498Szrj      change without updating deps_list->first->next->prev_nextp.  Thus
713*38fd1498Szrj      BACK_DEPS and RESOLVED_BACK_DEPS are allocated on the heap and FORW_DEPS
714*38fd1498Szrj      list is allocated on the obstack.  */
715*38fd1498Szrj 
716*38fd1498Szrj   /* A list of hard backward dependencies.  The insn is a consumer of all the
717*38fd1498Szrj      deps mentioned here.  */
718*38fd1498Szrj   deps_list_t hard_back_deps;
719*38fd1498Szrj 
720*38fd1498Szrj   /* A list of speculative (weak) dependencies.  The insn is a consumer of all
721*38fd1498Szrj      the deps mentioned here.  */
722*38fd1498Szrj   deps_list_t spec_back_deps;
723*38fd1498Szrj 
724*38fd1498Szrj   /* A list of insns which depend on the instruction.  Unlike 'back_deps',
725*38fd1498Szrj      it represents forward dependencies.  */
726*38fd1498Szrj   deps_list_t forw_deps;
727*38fd1498Szrj 
728*38fd1498Szrj   /* A list of scheduled producers of the instruction.  Links are being moved
729*38fd1498Szrj      from 'back_deps' to 'resolved_back_deps' while scheduling.  */
730*38fd1498Szrj   deps_list_t resolved_back_deps;
731*38fd1498Szrj 
732*38fd1498Szrj   /* A list of scheduled consumers of the instruction.  Links are being moved
733*38fd1498Szrj      from 'forw_deps' to 'resolved_forw_deps' while scheduling to fasten the
734*38fd1498Szrj      search in 'forw_deps'.  */
735*38fd1498Szrj   deps_list_t resolved_forw_deps;
736*38fd1498Szrj 
737*38fd1498Szrj   /* If the insn is conditional (either through COND_EXEC, or because
738*38fd1498Szrj      it is a conditional branch), this records the condition.  NULL
739*38fd1498Szrj      for insns that haven't been seen yet or don't have a condition;
740*38fd1498Szrj      const_true_rtx to mark an insn without a condition, or with a
741*38fd1498Szrj      condition that has been clobbered by a subsequent insn.  */
742*38fd1498Szrj   rtx cond;
743*38fd1498Szrj 
744*38fd1498Szrj   /* For a conditional insn, a list of insns that could set the condition
745*38fd1498Szrj      register.  Used when generating control dependencies.  */
746*38fd1498Szrj   rtx_insn_list *cond_deps;
747*38fd1498Szrj 
748*38fd1498Szrj   /* True if the condition in 'cond' should be reversed to get the actual
749*38fd1498Szrj      condition.  */
750*38fd1498Szrj   unsigned int reverse_cond : 1;
751*38fd1498Szrj 
752*38fd1498Szrj   /* Some insns (e.g. call) are not allowed to move across blocks.  */
753*38fd1498Szrj   unsigned int cant_move : 1;
754*38fd1498Szrj };
755*38fd1498Szrj 
756*38fd1498Szrj 
757*38fd1498Szrj /* Bits used for storing values of the fields in the following
758*38fd1498Szrj    structure.  */
759*38fd1498Szrj #define INCREASE_BITS 8
760*38fd1498Szrj 
761*38fd1498Szrj /* The structure describes how the corresponding insn increases the
762*38fd1498Szrj    register pressure for each pressure class.  */
763*38fd1498Szrj struct reg_pressure_data
764*38fd1498Szrj {
765*38fd1498Szrj   /* Pressure increase for given class because of clobber.  */
766*38fd1498Szrj   unsigned int clobber_increase : INCREASE_BITS;
767*38fd1498Szrj   /* Increase in register pressure for given class because of register
768*38fd1498Szrj      sets. */
769*38fd1498Szrj   unsigned int set_increase : INCREASE_BITS;
770*38fd1498Szrj   /* Pressure increase for given class because of unused register
771*38fd1498Szrj      set.  */
772*38fd1498Szrj   unsigned int unused_set_increase : INCREASE_BITS;
773*38fd1498Szrj   /* Pressure change: #sets - #deaths.  */
774*38fd1498Szrj   int change : INCREASE_BITS;
775*38fd1498Szrj };
776*38fd1498Szrj 
777*38fd1498Szrj /* The following structure describes usage of registers by insns.  */
778*38fd1498Szrj struct reg_use_data
779*38fd1498Szrj {
780*38fd1498Szrj   /* Regno used in the insn.  */
781*38fd1498Szrj   int regno;
782*38fd1498Szrj   /* Insn using the regno.  */
783*38fd1498Szrj   rtx_insn *insn;
784*38fd1498Szrj   /* Cyclic list of elements with the same regno.  */
785*38fd1498Szrj   struct reg_use_data *next_regno_use;
786*38fd1498Szrj   /* List of elements with the same insn.  */
787*38fd1498Szrj   struct reg_use_data *next_insn_use;
788*38fd1498Szrj };
789*38fd1498Szrj 
790*38fd1498Szrj /* The following structure describes used sets of registers by insns.
791*38fd1498Szrj    Registers are pseudos whose pressure class is not NO_REGS or hard
792*38fd1498Szrj    registers available for allocations.  */
793*38fd1498Szrj struct reg_set_data
794*38fd1498Szrj {
795*38fd1498Szrj   /* Regno used in the insn.  */
796*38fd1498Szrj   int regno;
797*38fd1498Szrj   /* Insn setting the regno.  */
798*38fd1498Szrj   rtx insn;
799*38fd1498Szrj   /* List of elements with the same insn.  */
800*38fd1498Szrj   struct reg_set_data *next_insn_set;
801*38fd1498Szrj };
802*38fd1498Szrj 
803*38fd1498Szrj enum autopref_multipass_data_status {
804*38fd1498Szrj   /* Entry is irrelevant for auto-prefetcher.  */
805*38fd1498Szrj   AUTOPREF_MULTIPASS_DATA_IRRELEVANT = -2,
806*38fd1498Szrj   /* Entry is uninitialized.  */
807*38fd1498Szrj   AUTOPREF_MULTIPASS_DATA_UNINITIALIZED = -1,
808*38fd1498Szrj   /* Entry is relevant for auto-prefetcher and insn can be delayed
809*38fd1498Szrj      to allow another insn through.  */
810*38fd1498Szrj   AUTOPREF_MULTIPASS_DATA_NORMAL = 0,
811*38fd1498Szrj   /* Entry is relevant for auto-prefetcher, but insn should not be
812*38fd1498Szrj      delayed as that will break scheduling.  */
813*38fd1498Szrj   AUTOPREF_MULTIPASS_DATA_DONT_DELAY = 1
814*38fd1498Szrj };
815*38fd1498Szrj 
816*38fd1498Szrj /* Data for modeling cache auto-prefetcher.  */
817*38fd1498Szrj struct autopref_multipass_data_
818*38fd1498Szrj {
819*38fd1498Szrj   /* Base part of memory address.  */
820*38fd1498Szrj   rtx base;
821*38fd1498Szrj 
822*38fd1498Szrj   /* Memory offsets from the base.  */
823*38fd1498Szrj   int offset;
824*38fd1498Szrj 
825*38fd1498Szrj   /* Entry status.  */
826*38fd1498Szrj   enum autopref_multipass_data_status status;
827*38fd1498Szrj };
828*38fd1498Szrj typedef struct autopref_multipass_data_ autopref_multipass_data_def;
829*38fd1498Szrj typedef autopref_multipass_data_def *autopref_multipass_data_t;
830*38fd1498Szrj 
831*38fd1498Szrj struct _haifa_insn_data
832*38fd1498Szrj {
833*38fd1498Szrj   /* We can't place 'struct _deps_list' into h_i_d instead of deps_list_t
834*38fd1498Szrj      because when h_i_d extends, addresses of the deps_list->first
835*38fd1498Szrj      change without updating deps_list->first->next->prev_nextp.  */
836*38fd1498Szrj 
837*38fd1498Szrj   /* Logical uid gives the original ordering of the insns.  */
838*38fd1498Szrj   int luid;
839*38fd1498Szrj 
840*38fd1498Szrj   /* A priority for each insn.  */
841*38fd1498Szrj   int priority;
842*38fd1498Szrj 
843*38fd1498Szrj   /* The fusion priority for each insn.  */
844*38fd1498Szrj   int fusion_priority;
845*38fd1498Szrj 
846*38fd1498Szrj   /* The minimum clock tick at which the insn becomes ready.  This is
847*38fd1498Szrj      used to note timing constraints for the insns in the pending list.  */
848*38fd1498Szrj   int tick;
849*38fd1498Szrj 
850*38fd1498Szrj   /* For insns that are scheduled at a fixed difference from another,
851*38fd1498Szrj      this records the tick in which they must be ready.  */
852*38fd1498Szrj   int exact_tick;
853*38fd1498Szrj 
854*38fd1498Szrj   /* INTER_TICK is used to adjust INSN_TICKs of instructions from the
855*38fd1498Szrj      subsequent blocks in a region.  */
856*38fd1498Szrj   int inter_tick;
857*38fd1498Szrj 
858*38fd1498Szrj   /* Used temporarily to estimate an INSN_TICK value for an insn given
859*38fd1498Szrj      current knowledge.  */
860*38fd1498Szrj   int tick_estimate;
861*38fd1498Szrj 
862*38fd1498Szrj   /* See comment on QUEUE_INDEX macro in haifa-sched.c.  */
863*38fd1498Szrj   int queue_index;
864*38fd1498Szrj 
865*38fd1498Szrj   short cost;
866*38fd1498Szrj 
867*38fd1498Szrj   /* '> 0' if priority is valid,
868*38fd1498Szrj      '== 0' if priority was not yet computed,
869*38fd1498Szrj      '< 0' if priority in invalid and should be recomputed.  */
870*38fd1498Szrj   signed char priority_status;
871*38fd1498Szrj 
872*38fd1498Szrj   /* Set if there's DEF-USE dependence between some speculatively
873*38fd1498Szrj      moved load insn and this one.  */
874*38fd1498Szrj   unsigned int fed_by_spec_load : 1;
875*38fd1498Szrj   unsigned int is_load_insn : 1;
876*38fd1498Szrj   /* Nonzero if this insn has negative-cost forward dependencies against
877*38fd1498Szrj      an already scheduled insn.  */
878*38fd1498Szrj   unsigned int feeds_backtrack_insn : 1;
879*38fd1498Szrj 
880*38fd1498Szrj   /* Nonzero if this insn is a shadow of another, scheduled after a fixed
881*38fd1498Szrj      delay.  We only emit shadows at the end of a cycle, with no other
882*38fd1498Szrj      real insns following them.  */
883*38fd1498Szrj   unsigned int shadow_p : 1;
884*38fd1498Szrj 
885*38fd1498Szrj   /* Used internally in unschedule_insns_until to mark insns that must have
886*38fd1498Szrj      their TODO_SPEC recomputed.  */
887*38fd1498Szrj   unsigned int must_recompute_spec : 1;
888*38fd1498Szrj 
889*38fd1498Szrj   /* What speculations are necessary to apply to schedule the instruction.  */
890*38fd1498Szrj   ds_t todo_spec;
891*38fd1498Szrj 
892*38fd1498Szrj   /* What speculations were already applied.  */
893*38fd1498Szrj   ds_t done_spec;
894*38fd1498Szrj 
895*38fd1498Szrj   /* What speculations are checked by this instruction.  */
896*38fd1498Szrj   ds_t check_spec;
897*38fd1498Szrj 
898*38fd1498Szrj   /* Recovery block for speculation checks.  */
899*38fd1498Szrj   basic_block recovery_block;
900*38fd1498Szrj 
901*38fd1498Szrj   /* Original pattern of the instruction.  */
902*38fd1498Szrj   rtx orig_pat;
903*38fd1498Szrj 
904*38fd1498Szrj   /* For insns with DEP_CONTROL dependencies, the predicated pattern if it
905*38fd1498Szrj      was ever successfully constructed.  */
906*38fd1498Szrj   rtx predicated_pat;
907*38fd1498Szrj 
908*38fd1498Szrj   /* The following array contains info how the insn increases register
909*38fd1498Szrj      pressure.  There is an element for each cover class of pseudos
910*38fd1498Szrj      referenced in insns.  */
911*38fd1498Szrj   struct reg_pressure_data *reg_pressure;
912*38fd1498Szrj   /* The following array contains maximal reg pressure between last
913*38fd1498Szrj      scheduled insn and given insn.  There is an element for each
914*38fd1498Szrj      pressure class of pseudos referenced in insns.  This info updated
915*38fd1498Szrj      after scheduling each insn for each insn between the two
916*38fd1498Szrj      mentioned insns.  */
917*38fd1498Szrj   int *max_reg_pressure;
918*38fd1498Szrj   /* The following list contains info about used pseudos and hard
919*38fd1498Szrj      registers available for allocation.  */
920*38fd1498Szrj   struct reg_use_data *reg_use_list;
921*38fd1498Szrj   /* The following list contains info about set pseudos and hard
922*38fd1498Szrj      registers available for allocation.  */
923*38fd1498Szrj   struct reg_set_data *reg_set_list;
924*38fd1498Szrj   /* Info about how scheduling the insn changes cost of register
925*38fd1498Szrj      pressure excess (between source and target).  */
926*38fd1498Szrj   int reg_pressure_excess_cost_change;
927*38fd1498Szrj   int model_index;
928*38fd1498Szrj 
929*38fd1498Szrj   /* Original order of insns in the ready list.  */
930*38fd1498Szrj   int rfs_debug_orig_order;
931*38fd1498Szrj 
932*38fd1498Szrj   /* The deciding reason for INSN's place in the ready list.  */
933*38fd1498Szrj   int last_rfs_win;
934*38fd1498Szrj 
935*38fd1498Szrj   /* Two entries for cache auto-prefetcher model: one for mem reads,
936*38fd1498Szrj      and one for mem writes.  */
937*38fd1498Szrj   autopref_multipass_data_def autopref_multipass_data[2];
938*38fd1498Szrj };
939*38fd1498Szrj 
940*38fd1498Szrj typedef struct _haifa_insn_data haifa_insn_data_def;
941*38fd1498Szrj typedef haifa_insn_data_def *haifa_insn_data_t;
942*38fd1498Szrj 
943*38fd1498Szrj 
944*38fd1498Szrj extern vec<haifa_insn_data_def> h_i_d;
945*38fd1498Szrj 
946*38fd1498Szrj #define HID(INSN) (&h_i_d[INSN_UID (INSN)])
947*38fd1498Szrj 
948*38fd1498Szrj /* Accessor macros for h_i_d.  There are more in haifa-sched.c and
949*38fd1498Szrj    sched-rgn.c.  */
950*38fd1498Szrj #define INSN_PRIORITY(INSN) (HID (INSN)->priority)
951*38fd1498Szrj #define INSN_FUSION_PRIORITY(INSN) (HID (INSN)->fusion_priority)
952*38fd1498Szrj #define INSN_REG_PRESSURE(INSN) (HID (INSN)->reg_pressure)
953*38fd1498Szrj #define INSN_MAX_REG_PRESSURE(INSN) (HID (INSN)->max_reg_pressure)
954*38fd1498Szrj #define INSN_REG_USE_LIST(INSN) (HID (INSN)->reg_use_list)
955*38fd1498Szrj #define INSN_REG_SET_LIST(INSN) (HID (INSN)->reg_set_list)
956*38fd1498Szrj #define INSN_REG_PRESSURE_EXCESS_COST_CHANGE(INSN) \
957*38fd1498Szrj   (HID (INSN)->reg_pressure_excess_cost_change)
958*38fd1498Szrj #define INSN_PRIORITY_STATUS(INSN) (HID (INSN)->priority_status)
959*38fd1498Szrj #define INSN_MODEL_INDEX(INSN) (HID (INSN)->model_index)
960*38fd1498Szrj #define INSN_AUTOPREF_MULTIPASS_DATA(INSN) \
961*38fd1498Szrj   (HID (INSN)->autopref_multipass_data)
962*38fd1498Szrj 
963*38fd1498Szrj typedef struct _haifa_deps_insn_data haifa_deps_insn_data_def;
964*38fd1498Szrj typedef haifa_deps_insn_data_def *haifa_deps_insn_data_t;
965*38fd1498Szrj 
966*38fd1498Szrj 
967*38fd1498Szrj extern vec<haifa_deps_insn_data_def> h_d_i_d;
968*38fd1498Szrj 
969*38fd1498Szrj #define HDID(INSN) (&h_d_i_d[INSN_LUID (INSN)])
970*38fd1498Szrj #define INSN_DEP_COUNT(INSN)	(HDID (INSN)->dep_count)
971*38fd1498Szrj #define HAS_INTERNAL_DEP(INSN)  (HDID (INSN)->has_internal_dep)
972*38fd1498Szrj #define INSN_FORW_DEPS(INSN) (HDID (INSN)->forw_deps)
973*38fd1498Szrj #define INSN_RESOLVED_BACK_DEPS(INSN) (HDID (INSN)->resolved_back_deps)
974*38fd1498Szrj #define INSN_RESOLVED_FORW_DEPS(INSN) (HDID (INSN)->resolved_forw_deps)
975*38fd1498Szrj #define INSN_HARD_BACK_DEPS(INSN) (HDID (INSN)->hard_back_deps)
976*38fd1498Szrj #define INSN_SPEC_BACK_DEPS(INSN) (HDID (INSN)->spec_back_deps)
977*38fd1498Szrj #define INSN_CACHED_COND(INSN)	(HDID (INSN)->cond)
978*38fd1498Szrj #define INSN_REVERSE_COND(INSN) (HDID (INSN)->reverse_cond)
979*38fd1498Szrj #define INSN_COND_DEPS(INSN)	(HDID (INSN)->cond_deps)
980*38fd1498Szrj #define CANT_MOVE(INSN)	(HDID (INSN)->cant_move)
981*38fd1498Szrj #define CANT_MOVE_BY_LUID(LUID)	(h_d_i_d[LUID].cant_move)
982*38fd1498Szrj 
983*38fd1498Szrj 
984*38fd1498Szrj #define INSN_PRIORITY(INSN)	(HID (INSN)->priority)
985*38fd1498Szrj #define INSN_PRIORITY_STATUS(INSN) (HID (INSN)->priority_status)
986*38fd1498Szrj #define INSN_PRIORITY_KNOWN(INSN) (INSN_PRIORITY_STATUS (INSN) > 0)
987*38fd1498Szrj #define TODO_SPEC(INSN) (HID (INSN)->todo_spec)
988*38fd1498Szrj #define DONE_SPEC(INSN) (HID (INSN)->done_spec)
989*38fd1498Szrj #define CHECK_SPEC(INSN) (HID (INSN)->check_spec)
990*38fd1498Szrj #define RECOVERY_BLOCK(INSN) (HID (INSN)->recovery_block)
991*38fd1498Szrj #define ORIG_PAT(INSN) (HID (INSN)->orig_pat)
992*38fd1498Szrj #define PREDICATED_PAT(INSN) (HID (INSN)->predicated_pat)
993*38fd1498Szrj 
994*38fd1498Szrj /* INSN is either a simple or a branchy speculation check.  */
995*38fd1498Szrj #define IS_SPECULATION_CHECK_P(INSN) \
996*38fd1498Szrj   (sel_sched_p () ? sel_insn_is_speculation_check (INSN) : RECOVERY_BLOCK (INSN) != NULL)
997*38fd1498Szrj 
998*38fd1498Szrj /* INSN is a speculation check that will simply reexecute the speculatively
999*38fd1498Szrj    scheduled instruction if the speculation fails.  */
1000*38fd1498Szrj #define IS_SPECULATION_SIMPLE_CHECK_P(INSN) \
1001*38fd1498Szrj   (RECOVERY_BLOCK (INSN) == EXIT_BLOCK_PTR_FOR_FN (cfun))
1002*38fd1498Szrj 
1003*38fd1498Szrj /* INSN is a speculation check that will branch to RECOVERY_BLOCK if the
1004*38fd1498Szrj    speculation fails.  Insns in that block will reexecute the speculatively
1005*38fd1498Szrj    scheduled code and then will return immediately after INSN thus preserving
1006*38fd1498Szrj    semantics of the program.  */
1007*38fd1498Szrj #define IS_SPECULATION_BRANCHY_CHECK_P(INSN) \
1008*38fd1498Szrj   (RECOVERY_BLOCK (INSN) != NULL             \
1009*38fd1498Szrj    && RECOVERY_BLOCK (INSN) != EXIT_BLOCK_PTR_FOR_FN (cfun))
1010*38fd1498Szrj 
1011*38fd1498Szrj 
1012*38fd1498Szrj /* Dep status (aka ds_t) of the link encapsulates all information for a given
1013*38fd1498Szrj    dependency, including everything that is needed for speculative scheduling.
1014*38fd1498Szrj 
1015*38fd1498Szrj    The lay-out of a ds_t is as follows:
1016*38fd1498Szrj 
1017*38fd1498Szrj    1. Integers corresponding to the probability of the dependence to *not*
1018*38fd1498Szrj       exist.  This is the probability that overcoming this dependence will
1019*38fd1498Szrj       not be followed by execution of the recovery code.  Note that however
1020*38fd1498Szrj       high this probability is, the recovery code should still always be
1021*38fd1498Szrj       generated to preserve semantics of the program.
1022*38fd1498Szrj 
1023*38fd1498Szrj       The probability values can be set or retrieved using the functions
1024*38fd1498Szrj       the set_dep_weak() and get_dep_weak() in sched-deps.c.  The values
1025*38fd1498Szrj       are always in the range [0, MAX_DEP_WEAK].
1026*38fd1498Szrj 
1027*38fd1498Szrj 	BEGIN_DATA	: BITS_PER_DEP_WEAK
1028*38fd1498Szrj 	BE_IN_DATA	: BITS_PER_DEP_WEAK
1029*38fd1498Szrj 	BEGIN_CONTROL	: BITS_PER_DEP_WEAK
1030*38fd1498Szrj 	BE_IN_CONTROL	: BITS_PER_DEP_WEAK
1031*38fd1498Szrj 
1032*38fd1498Szrj       The basic type of DS_T is a host int.  For a 32-bits int, the values
1033*38fd1498Szrj       will each take 6 bits.
1034*38fd1498Szrj 
1035*38fd1498Szrj    2. The type of dependence.  This supercedes the old-style REG_NOTE_KIND
1036*38fd1498Szrj       values.  TODO: Use this field instead of DEP_TYPE, or make DEP_TYPE
1037*38fd1498Szrj       extract the dependence type from here.
1038*38fd1498Szrj 
1039*38fd1498Szrj 	dep_type	:  4 => DEP_{TRUE|OUTPUT|ANTI|CONTROL}
1040*38fd1498Szrj 
1041*38fd1498Szrj    3. Various flags:
1042*38fd1498Szrj 
1043*38fd1498Szrj 	HARD_DEP	:  1 =>	Set if an instruction has a non-speculative
1044*38fd1498Szrj 				dependence.  This is an instruction property
1045*38fd1498Szrj 				so this bit can only appear in the TODO_SPEC
1046*38fd1498Szrj 				field of an instruction.
1047*38fd1498Szrj 	DEP_POSTPONED	:  1 =>	Like HARD_DEP, but the hard dependence may
1048*38fd1498Szrj 				still be broken by adjusting the instruction.
1049*38fd1498Szrj 	DEP_CANCELLED	:  1 =>	Set if a dependency has been broken using
1050*38fd1498Szrj 				some form of speculation.
1051*38fd1498Szrj 	RESERVED	:  1 => Reserved for use in the delay slot scheduler.
1052*38fd1498Szrj 
1053*38fd1498Szrj    See also: check_dep_status () in sched-deps.c .  */
1054*38fd1498Szrj 
1055*38fd1498Szrj /* The number of bits per weakness probability.  There are 4 weakness types
1056*38fd1498Szrj    and we need 8 bits for other data in a DS_T.  */
1057*38fd1498Szrj #define BITS_PER_DEP_WEAK ((BITS_PER_DEP_STATUS - 8) / 4)
1058*38fd1498Szrj 
1059*38fd1498Szrj /* Mask of speculative weakness in dep_status.  */
1060*38fd1498Szrj #define DEP_WEAK_MASK ((1 << BITS_PER_DEP_WEAK) - 1)
1061*38fd1498Szrj 
1062*38fd1498Szrj /* This constant means that dependence is fake with 99.999...% probability.
1063*38fd1498Szrj    This is the maximum value, that can appear in dep_status.
1064*38fd1498Szrj    Note, that we don't want MAX_DEP_WEAK to be the same as DEP_WEAK_MASK for
1065*38fd1498Szrj    debugging reasons.  Though, it can be set to DEP_WEAK_MASK, and, when
1066*38fd1498Szrj    done so, we'll get fast (mul for)/(div by) NO_DEP_WEAK.  */
1067*38fd1498Szrj #define MAX_DEP_WEAK (DEP_WEAK_MASK - 1)
1068*38fd1498Szrj 
1069*38fd1498Szrj /* This constant means that dependence is 99.999...% real and it is a really
1070*38fd1498Szrj    bad idea to overcome it (though this can be done, preserving program
1071*38fd1498Szrj    semantics).  */
1072*38fd1498Szrj #define MIN_DEP_WEAK 1
1073*38fd1498Szrj 
1074*38fd1498Szrj /* This constant represents 100% probability.
1075*38fd1498Szrj    E.g. it is used to represent weakness of dependence, that doesn't exist.
1076*38fd1498Szrj    This value never appears in a ds_t, it is only used for computing the
1077*38fd1498Szrj    weakness of a dependence.  */
1078*38fd1498Szrj #define NO_DEP_WEAK (MAX_DEP_WEAK + MIN_DEP_WEAK)
1079*38fd1498Szrj 
1080*38fd1498Szrj /* Default weakness of speculative dependence.  Used when we can't say
1081*38fd1498Szrj    neither bad nor good about the dependence.  */
1082*38fd1498Szrj #define UNCERTAIN_DEP_WEAK (MAX_DEP_WEAK - MAX_DEP_WEAK / 4)
1083*38fd1498Szrj 
1084*38fd1498Szrj /* Offset for speculative weaknesses in dep_status.  */
1085*38fd1498Szrj enum SPEC_TYPES_OFFSETS {
1086*38fd1498Szrj   BEGIN_DATA_BITS_OFFSET = 0,
1087*38fd1498Szrj   BE_IN_DATA_BITS_OFFSET = BEGIN_DATA_BITS_OFFSET + BITS_PER_DEP_WEAK,
1088*38fd1498Szrj   BEGIN_CONTROL_BITS_OFFSET = BE_IN_DATA_BITS_OFFSET + BITS_PER_DEP_WEAK,
1089*38fd1498Szrj   BE_IN_CONTROL_BITS_OFFSET = BEGIN_CONTROL_BITS_OFFSET + BITS_PER_DEP_WEAK
1090*38fd1498Szrj };
1091*38fd1498Szrj 
1092*38fd1498Szrj /* The following defines provide numerous constants used to distinguish
1093*38fd1498Szrj    between different types of speculative dependencies.  They are also
1094*38fd1498Szrj    used as masks to clear/preserve the bits corresponding to the type
1095*38fd1498Szrj    of dependency weakness.  */
1096*38fd1498Szrj 
1097*38fd1498Szrj /* Dependence can be overcome with generation of new data speculative
1098*38fd1498Szrj    instruction.  */
1099*38fd1498Szrj #define BEGIN_DATA (((ds_t) DEP_WEAK_MASK) << BEGIN_DATA_BITS_OFFSET)
1100*38fd1498Szrj 
1101*38fd1498Szrj /* This dependence is to the instruction in the recovery block, that was
1102*38fd1498Szrj    formed to recover after data-speculation failure.
1103*38fd1498Szrj    Thus, this dependence can overcome with generating of the copy of
1104*38fd1498Szrj    this instruction in the recovery block.  */
1105*38fd1498Szrj #define BE_IN_DATA (((ds_t) DEP_WEAK_MASK) << BE_IN_DATA_BITS_OFFSET)
1106*38fd1498Szrj 
1107*38fd1498Szrj /* Dependence can be overcome with generation of new control speculative
1108*38fd1498Szrj    instruction.  */
1109*38fd1498Szrj #define BEGIN_CONTROL (((ds_t) DEP_WEAK_MASK) << BEGIN_CONTROL_BITS_OFFSET)
1110*38fd1498Szrj 
1111*38fd1498Szrj /* This dependence is to the instruction in the recovery block, that was
1112*38fd1498Szrj    formed to recover after control-speculation failure.
1113*38fd1498Szrj    Thus, this dependence can be overcome with generating of the copy of
1114*38fd1498Szrj    this instruction in the recovery block.  */
1115*38fd1498Szrj #define BE_IN_CONTROL (((ds_t) DEP_WEAK_MASK) << BE_IN_CONTROL_BITS_OFFSET)
1116*38fd1498Szrj 
1117*38fd1498Szrj /* A few convenient combinations.  */
1118*38fd1498Szrj #define BEGIN_SPEC (BEGIN_DATA | BEGIN_CONTROL)
1119*38fd1498Szrj #define DATA_SPEC (BEGIN_DATA | BE_IN_DATA)
1120*38fd1498Szrj #define CONTROL_SPEC (BEGIN_CONTROL | BE_IN_CONTROL)
1121*38fd1498Szrj #define SPECULATIVE (DATA_SPEC | CONTROL_SPEC)
1122*38fd1498Szrj #define BE_IN_SPEC (BE_IN_DATA | BE_IN_CONTROL)
1123*38fd1498Szrj 
1124*38fd1498Szrj /* Constants, that are helpful in iterating through dep_status.  */
1125*38fd1498Szrj #define FIRST_SPEC_TYPE BEGIN_DATA
1126*38fd1498Szrj #define LAST_SPEC_TYPE BE_IN_CONTROL
1127*38fd1498Szrj #define SPEC_TYPE_SHIFT BITS_PER_DEP_WEAK
1128*38fd1498Szrj 
1129*38fd1498Szrj /* Dependence on instruction can be of multiple types
1130*38fd1498Szrj    (e.g. true and output). This fields enhance REG_NOTE_KIND information
1131*38fd1498Szrj    of the dependence.  */
1132*38fd1498Szrj #define DEP_TRUE (((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + BITS_PER_DEP_WEAK))
1133*38fd1498Szrj #define DEP_OUTPUT (DEP_TRUE << 1)
1134*38fd1498Szrj #define DEP_ANTI (DEP_OUTPUT << 1)
1135*38fd1498Szrj #define DEP_CONTROL (DEP_ANTI << 1)
1136*38fd1498Szrj 
1137*38fd1498Szrj #define DEP_TYPES (DEP_TRUE | DEP_OUTPUT | DEP_ANTI | DEP_CONTROL)
1138*38fd1498Szrj 
1139*38fd1498Szrj /* Instruction has non-speculative dependence.  This bit represents the
1140*38fd1498Szrj    property of an instruction - not the one of a dependence.
1141*38fd1498Szrj    Therefore, it can appear only in the TODO_SPEC field of an instruction.  */
1142*38fd1498Szrj #define HARD_DEP (DEP_CONTROL << 1)
1143*38fd1498Szrj 
1144*38fd1498Szrj /* Like HARD_DEP, but dependencies can perhaps be broken by modifying
1145*38fd1498Szrj    the instructions.  This is used for example to change:
1146*38fd1498Szrj 
1147*38fd1498Szrj    rn++		=>	rm=[rn + 4]
1148*38fd1498Szrj    rm=[rn]		rn++
1149*38fd1498Szrj 
1150*38fd1498Szrj    For instructions that have this bit set, one of the dependencies of
1151*38fd1498Szrj    the instructions will have a non-NULL REPLACE field in its DEP_T.
1152*38fd1498Szrj    Just like HARD_DEP, this bit is only ever set in TODO_SPEC.  */
1153*38fd1498Szrj #define DEP_POSTPONED (HARD_DEP << 1)
1154*38fd1498Szrj 
1155*38fd1498Szrj /* Set if a dependency is cancelled via speculation.  */
1156*38fd1498Szrj #define DEP_CANCELLED (DEP_POSTPONED << 1)
1157*38fd1498Szrj 
1158*38fd1498Szrj 
1159*38fd1498Szrj /* This represents the results of calling sched-deps.c functions,
1160*38fd1498Szrj    which modify dependencies.  */
1161*38fd1498Szrj enum DEPS_ADJUST_RESULT {
1162*38fd1498Szrj   /* No dependence needed (e.g. producer == consumer).  */
1163*38fd1498Szrj   DEP_NODEP,
1164*38fd1498Szrj   /* Dependence is already present and wasn't modified.  */
1165*38fd1498Szrj   DEP_PRESENT,
1166*38fd1498Szrj   /* Existing dependence was modified to include additional information.  */
1167*38fd1498Szrj   DEP_CHANGED,
1168*38fd1498Szrj   /* New dependence has been created.  */
1169*38fd1498Szrj   DEP_CREATED
1170*38fd1498Szrj };
1171*38fd1498Szrj 
1172*38fd1498Szrj /* Represents the bits that can be set in the flags field of the
1173*38fd1498Szrj    sched_info structure.  */
1174*38fd1498Szrj enum SCHED_FLAGS {
1175*38fd1498Szrj   /* If set, generate links between instruction as DEPS_LIST.
1176*38fd1498Szrj      Otherwise, generate usual INSN_LIST links.  */
1177*38fd1498Szrj   USE_DEPS_LIST = 1,
1178*38fd1498Szrj   /* Perform data or control (or both) speculation.
1179*38fd1498Szrj      Results in generation of data and control speculative dependencies.
1180*38fd1498Szrj      Requires USE_DEPS_LIST set.  */
1181*38fd1498Szrj   DO_SPECULATION = USE_DEPS_LIST << 1,
1182*38fd1498Szrj   DO_BACKTRACKING = DO_SPECULATION << 1,
1183*38fd1498Szrj   DO_PREDICATION = DO_BACKTRACKING << 1,
1184*38fd1498Szrj   DONT_BREAK_DEPENDENCIES = DO_PREDICATION << 1,
1185*38fd1498Szrj   SCHED_RGN = DONT_BREAK_DEPENDENCIES << 1,
1186*38fd1498Szrj   SCHED_EBB = SCHED_RGN << 1,
1187*38fd1498Szrj   /* Scheduler can possibly create new basic blocks.  Used for assertions.  */
1188*38fd1498Szrj   NEW_BBS = SCHED_EBB << 1,
1189*38fd1498Szrj   SEL_SCHED = NEW_BBS << 1
1190*38fd1498Szrj };
1191*38fd1498Szrj 
1192*38fd1498Szrj enum SPEC_SCHED_FLAGS {
1193*38fd1498Szrj   COUNT_SPEC_IN_CRITICAL_PATH = 1,
1194*38fd1498Szrj   SEL_SCHED_SPEC_DONT_CHECK_CONTROL = COUNT_SPEC_IN_CRITICAL_PATH << 1
1195*38fd1498Szrj };
1196*38fd1498Szrj 
1197*38fd1498Szrj #define NOTE_NOT_BB_P(NOTE) (NOTE_P (NOTE) && (NOTE_KIND (NOTE)	\
1198*38fd1498Szrj 					       != NOTE_INSN_BASIC_BLOCK))
1199*38fd1498Szrj 
1200*38fd1498Szrj extern FILE *sched_dump;
1201*38fd1498Szrj extern int sched_verbose;
1202*38fd1498Szrj 
1203*38fd1498Szrj extern spec_info_t spec_info;
1204*38fd1498Szrj extern bool haifa_recovery_bb_ever_added_p;
1205*38fd1498Szrj 
1206*38fd1498Szrj /* Exception Free Loads:
1207*38fd1498Szrj 
1208*38fd1498Szrj    We define five classes of speculative loads: IFREE, IRISKY,
1209*38fd1498Szrj    PFREE, PRISKY, and MFREE.
1210*38fd1498Szrj 
1211*38fd1498Szrj    IFREE loads are loads that are proved to be exception-free, just
1212*38fd1498Szrj    by examining the load insn.  Examples for such loads are loads
1213*38fd1498Szrj    from TOC and loads of global data.
1214*38fd1498Szrj 
1215*38fd1498Szrj    IRISKY loads are loads that are proved to be exception-risky,
1216*38fd1498Szrj    just by examining the load insn.  Examples for such loads are
1217*38fd1498Szrj    volatile loads and loads from shared memory.
1218*38fd1498Szrj 
1219*38fd1498Szrj    PFREE loads are loads for which we can prove, by examining other
1220*38fd1498Szrj    insns, that they are exception-free.  Currently, this class consists
1221*38fd1498Szrj    of loads for which we are able to find a "similar load", either in
1222*38fd1498Szrj    the target block, or, if only one split-block exists, in that split
1223*38fd1498Szrj    block.  Load2 is similar to load1 if both have same single base
1224*38fd1498Szrj    register.  We identify only part of the similar loads, by finding
1225*38fd1498Szrj    an insn upon which both load1 and load2 have a DEF-USE dependence.
1226*38fd1498Szrj 
1227*38fd1498Szrj    PRISKY loads are loads for which we can prove, by examining other
1228*38fd1498Szrj    insns, that they are exception-risky.  Currently we have two proofs for
1229*38fd1498Szrj    such loads.  The first proof detects loads that are probably guarded by a
1230*38fd1498Szrj    test on the memory address.  This proof is based on the
1231*38fd1498Szrj    backward and forward data dependence information for the region.
1232*38fd1498Szrj    Let load-insn be the examined load.
1233*38fd1498Szrj    Load-insn is PRISKY iff ALL the following hold:
1234*38fd1498Szrj 
1235*38fd1498Szrj    - insn1 is not in the same block as load-insn
1236*38fd1498Szrj    - there is a DEF-USE dependence chain (insn1, ..., load-insn)
1237*38fd1498Szrj    - test-insn is either a compare or a branch, not in the same block
1238*38fd1498Szrj      as load-insn
1239*38fd1498Szrj    - load-insn is reachable from test-insn
1240*38fd1498Szrj    - there is a DEF-USE dependence chain (insn1, ..., test-insn)
1241*38fd1498Szrj 
1242*38fd1498Szrj    This proof might fail when the compare and the load are fed
1243*38fd1498Szrj    by an insn not in the region.  To solve this, we will add to this
1244*38fd1498Szrj    group all loads that have no input DEF-USE dependence.
1245*38fd1498Szrj 
1246*38fd1498Szrj    The second proof detects loads that are directly or indirectly
1247*38fd1498Szrj    fed by a speculative load.  This proof is affected by the
1248*38fd1498Szrj    scheduling process.  We will use the flag  fed_by_spec_load.
1249*38fd1498Szrj    Initially, all insns have this flag reset.  After a speculative
1250*38fd1498Szrj    motion of an insn, if insn is either a load, or marked as
1251*38fd1498Szrj    fed_by_spec_load, we will also mark as fed_by_spec_load every
1252*38fd1498Szrj    insn1 for which a DEF-USE dependence (insn, insn1) exists.  A
1253*38fd1498Szrj    load which is fed_by_spec_load is also PRISKY.
1254*38fd1498Szrj 
1255*38fd1498Szrj    MFREE (maybe-free) loads are all the remaining loads. They may be
1256*38fd1498Szrj    exception-free, but we cannot prove it.
1257*38fd1498Szrj 
1258*38fd1498Szrj    Now, all loads in IFREE and PFREE classes are considered
1259*38fd1498Szrj    exception-free, while all loads in IRISKY and PRISKY classes are
1260*38fd1498Szrj    considered exception-risky.  As for loads in the MFREE class,
1261*38fd1498Szrj    these are considered either exception-free or exception-risky,
1262*38fd1498Szrj    depending on whether we are pessimistic or optimistic.  We have
1263*38fd1498Szrj    to take the pessimistic approach to assure the safety of
1264*38fd1498Szrj    speculative scheduling, but we can take the optimistic approach
1265*38fd1498Szrj    by invoking the -fsched_spec_load_dangerous option.  */
1266*38fd1498Szrj 
1267*38fd1498Szrj enum INSN_TRAP_CLASS
1268*38fd1498Szrj {
1269*38fd1498Szrj   TRAP_FREE = 0, IFREE = 1, PFREE_CANDIDATE = 2,
1270*38fd1498Szrj   PRISKY_CANDIDATE = 3, IRISKY = 4, TRAP_RISKY = 5
1271*38fd1498Szrj };
1272*38fd1498Szrj 
1273*38fd1498Szrj #define WORST_CLASS(class1, class2) \
1274*38fd1498Szrj ((class1 > class2) ? class1 : class2)
1275*38fd1498Szrj 
1276*38fd1498Szrj #ifndef __GNUC__
1277*38fd1498Szrj #define __inline
1278*38fd1498Szrj #endif
1279*38fd1498Szrj 
1280*38fd1498Szrj #ifndef HAIFA_INLINE
1281*38fd1498Szrj #define HAIFA_INLINE __inline
1282*38fd1498Szrj #endif
1283*38fd1498Szrj 
1284*38fd1498Szrj struct sched_deps_info_def
1285*38fd1498Szrj {
1286*38fd1498Szrj   /* Called when computing dependencies for a JUMP_INSN.  This function
1287*38fd1498Szrj      should store the set of registers that must be considered as set by
1288*38fd1498Szrj      the jump in the regset.  */
1289*38fd1498Szrj   void (*compute_jump_reg_dependencies) (rtx, regset);
1290*38fd1498Szrj 
1291*38fd1498Szrj   /* Start analyzing insn.  */
1292*38fd1498Szrj   void (*start_insn) (rtx_insn *);
1293*38fd1498Szrj 
1294*38fd1498Szrj   /* Finish analyzing insn.  */
1295*38fd1498Szrj   void (*finish_insn) (void);
1296*38fd1498Szrj 
1297*38fd1498Szrj   /* Start analyzing insn LHS (Left Hand Side).  */
1298*38fd1498Szrj   void (*start_lhs) (rtx);
1299*38fd1498Szrj 
1300*38fd1498Szrj   /* Finish analyzing insn LHS.  */
1301*38fd1498Szrj   void (*finish_lhs) (void);
1302*38fd1498Szrj 
1303*38fd1498Szrj   /* Start analyzing insn RHS (Right Hand Side).  */
1304*38fd1498Szrj   void (*start_rhs) (rtx);
1305*38fd1498Szrj 
1306*38fd1498Szrj   /* Finish analyzing insn RHS.  */
1307*38fd1498Szrj   void (*finish_rhs) (void);
1308*38fd1498Szrj 
1309*38fd1498Szrj   /* Note set of the register.  */
1310*38fd1498Szrj   void (*note_reg_set) (int);
1311*38fd1498Szrj 
1312*38fd1498Szrj   /* Note clobber of the register.  */
1313*38fd1498Szrj   void (*note_reg_clobber) (int);
1314*38fd1498Szrj 
1315*38fd1498Szrj   /* Note use of the register.  */
1316*38fd1498Szrj   void (*note_reg_use) (int);
1317*38fd1498Szrj 
1318*38fd1498Szrj   /* Note memory dependence of type DS between MEM1 and MEM2 (which is
1319*38fd1498Szrj      in the INSN2).  */
1320*38fd1498Szrj   void (*note_mem_dep) (rtx mem1, rtx mem2, rtx_insn *insn2, ds_t ds);
1321*38fd1498Szrj 
1322*38fd1498Szrj   /* Note a dependence of type DS from the INSN.  */
1323*38fd1498Szrj   void (*note_dep) (rtx_insn *, ds_t ds);
1324*38fd1498Szrj 
1325*38fd1498Szrj   /* Nonzero if we should use cselib for better alias analysis.  This
1326*38fd1498Szrj      must be 0 if the dependency information is used after sched_analyze
1327*38fd1498Szrj      has completed, e.g. if we're using it to initialize state for successor
1328*38fd1498Szrj      blocks in region scheduling.  */
1329*38fd1498Szrj   unsigned int use_cselib : 1;
1330*38fd1498Szrj 
1331*38fd1498Szrj   /* If set, generate links between instruction as DEPS_LIST.
1332*38fd1498Szrj      Otherwise, generate usual INSN_LIST links.  */
1333*38fd1498Szrj   unsigned int use_deps_list : 1;
1334*38fd1498Szrj 
1335*38fd1498Szrj   /* Generate data and control speculative dependencies.
1336*38fd1498Szrj      Requires USE_DEPS_LIST set.  */
1337*38fd1498Szrj   unsigned int generate_spec_deps : 1;
1338*38fd1498Szrj };
1339*38fd1498Szrj 
1340*38fd1498Szrj extern struct sched_deps_info_def *sched_deps_info;
1341*38fd1498Szrj 
1342*38fd1498Szrj 
1343*38fd1498Szrj /* Functions in sched-deps.c.  */
1344*38fd1498Szrj extern rtx sched_get_reverse_condition_uncached (const rtx_insn *);
1345*38fd1498Szrj extern bool sched_insns_conditions_mutex_p (const rtx_insn *,
1346*38fd1498Szrj 					    const rtx_insn *);
1347*38fd1498Szrj extern bool sched_insn_is_legitimate_for_speculation_p (const rtx_insn *, ds_t);
1348*38fd1498Szrj extern void add_dependence (rtx_insn *, rtx_insn *, enum reg_note);
1349*38fd1498Szrj extern void sched_analyze (struct deps_desc *, rtx_insn *, rtx_insn *);
1350*38fd1498Szrj extern void init_deps (struct deps_desc *, bool);
1351*38fd1498Szrj extern void init_deps_reg_last (struct deps_desc *);
1352*38fd1498Szrj extern void free_deps (struct deps_desc *);
1353*38fd1498Szrj extern void init_deps_global (void);
1354*38fd1498Szrj extern void finish_deps_global (void);
1355*38fd1498Szrj extern void deps_analyze_insn (struct deps_desc *, rtx_insn *);
1356*38fd1498Szrj extern void remove_from_deps (struct deps_desc *, rtx_insn *);
1357*38fd1498Szrj extern void init_insn_reg_pressure_info (rtx_insn *);
1358*38fd1498Szrj extern void get_implicit_reg_pending_clobbers (HARD_REG_SET *, rtx_insn *);
1359*38fd1498Szrj 
1360*38fd1498Szrj extern dw_t get_dep_weak (ds_t, ds_t);
1361*38fd1498Szrj extern ds_t set_dep_weak (ds_t, ds_t, dw_t);
1362*38fd1498Szrj extern dw_t estimate_dep_weak (rtx, rtx);
1363*38fd1498Szrj extern ds_t ds_merge (ds_t, ds_t);
1364*38fd1498Szrj extern ds_t ds_full_merge (ds_t, ds_t, rtx, rtx);
1365*38fd1498Szrj extern ds_t ds_max_merge (ds_t, ds_t);
1366*38fd1498Szrj extern dw_t ds_weak (ds_t);
1367*38fd1498Szrj extern ds_t ds_get_speculation_types (ds_t);
1368*38fd1498Szrj extern ds_t ds_get_max_dep_weak (ds_t);
1369*38fd1498Szrj 
1370*38fd1498Szrj extern void sched_deps_init (bool);
1371*38fd1498Szrj extern void sched_deps_finish (void);
1372*38fd1498Szrj 
1373*38fd1498Szrj extern void haifa_note_reg_set (int);
1374*38fd1498Szrj extern void haifa_note_reg_clobber (int);
1375*38fd1498Szrj extern void haifa_note_reg_use (int);
1376*38fd1498Szrj 
1377*38fd1498Szrj extern void maybe_extend_reg_info_p (void);
1378*38fd1498Szrj 
1379*38fd1498Szrj extern void deps_start_bb (struct deps_desc *, rtx_insn *);
1380*38fd1498Szrj extern enum reg_note ds_to_dt (ds_t);
1381*38fd1498Szrj 
1382*38fd1498Szrj extern bool deps_pools_are_empty_p (void);
1383*38fd1498Szrj extern void sched_free_deps (rtx_insn *, rtx_insn *, bool);
1384*38fd1498Szrj extern void extend_dependency_caches (int, bool);
1385*38fd1498Szrj 
1386*38fd1498Szrj extern void debug_ds (ds_t);
1387*38fd1498Szrj 
1388*38fd1498Szrj 
1389*38fd1498Szrj /* Functions in haifa-sched.c.  */
1390*38fd1498Szrj extern void initialize_live_range_shrinkage (void);
1391*38fd1498Szrj extern void finish_live_range_shrinkage (void);
1392*38fd1498Szrj extern void sched_init_region_reg_pressure_info (void);
1393*38fd1498Szrj extern void free_global_sched_pressure_data (void);
1394*38fd1498Szrj extern int haifa_classify_insn (const_rtx);
1395*38fd1498Szrj extern void get_ebb_head_tail (basic_block, basic_block,
1396*38fd1498Szrj 			       rtx_insn **, rtx_insn **);
1397*38fd1498Szrj extern int no_real_insns_p (const rtx_insn *, const rtx_insn *);
1398*38fd1498Szrj 
1399*38fd1498Szrj extern int insn_sched_cost (rtx_insn *);
1400*38fd1498Szrj extern int dep_cost_1 (dep_t, dw_t);
1401*38fd1498Szrj extern int dep_cost (dep_t);
1402*38fd1498Szrj extern int set_priorities (rtx_insn *, rtx_insn *);
1403*38fd1498Szrj 
1404*38fd1498Szrj extern void sched_setup_bb_reg_pressure_info (basic_block, rtx_insn *);
1405*38fd1498Szrj extern bool schedule_block (basic_block *, state_t);
1406*38fd1498Szrj 
1407*38fd1498Szrj extern int cycle_issued_insns;
1408*38fd1498Szrj extern int issue_rate;
1409*38fd1498Szrj extern int dfa_lookahead;
1410*38fd1498Szrj 
1411*38fd1498Szrj extern int autopref_multipass_dfa_lookahead_guard (rtx_insn *, int);
1412*38fd1498Szrj 
1413*38fd1498Szrj extern rtx_insn *ready_element (struct ready_list *, int);
1414*38fd1498Szrj extern rtx_insn **ready_lastpos (struct ready_list *);
1415*38fd1498Szrj 
1416*38fd1498Szrj extern int try_ready (rtx_insn *);
1417*38fd1498Szrj extern void sched_extend_ready_list (int);
1418*38fd1498Szrj extern void sched_finish_ready_list (void);
1419*38fd1498Szrj extern void sched_change_pattern (rtx, rtx);
1420*38fd1498Szrj extern int sched_speculate_insn (rtx_insn *, ds_t, rtx *);
1421*38fd1498Szrj extern void unlink_bb_notes (basic_block, basic_block);
1422*38fd1498Szrj extern void add_block (basic_block, basic_block);
1423*38fd1498Szrj extern rtx_note *bb_note (basic_block);
1424*38fd1498Szrj extern void concat_note_lists (rtx_insn *, rtx_insn **);
1425*38fd1498Szrj extern rtx_insn *sched_emit_insn (rtx);
1426*38fd1498Szrj extern rtx_insn *get_ready_element (int);
1427*38fd1498Szrj extern int number_in_ready (void);
1428*38fd1498Szrj 
1429*38fd1498Szrj /* Types and functions in sched-ebb.c.  */
1430*38fd1498Szrj 
1431*38fd1498Szrj extern basic_block schedule_ebb (rtx_insn *, rtx_insn *, bool);
1432*38fd1498Szrj extern void schedule_ebbs_init (void);
1433*38fd1498Szrj extern void schedule_ebbs_finish (void);
1434*38fd1498Szrj 
1435*38fd1498Szrj /* Types and functions in sched-rgn.c.  */
1436*38fd1498Szrj 
1437*38fd1498Szrj /* A region is the main entity for interblock scheduling: insns
1438*38fd1498Szrj    are allowed to move between blocks in the same region, along
1439*38fd1498Szrj    control flow graph edges, in the 'up' direction.  */
1440*38fd1498Szrj struct region
1441*38fd1498Szrj {
1442*38fd1498Szrj   /* Number of extended basic blocks in region.  */
1443*38fd1498Szrj   int rgn_nr_blocks;
1444*38fd1498Szrj   /* cblocks in the region (actually index in rgn_bb_table).  */
1445*38fd1498Szrj   int rgn_blocks;
1446*38fd1498Szrj   /* Dependencies for this region are already computed.  Basically, indicates,
1447*38fd1498Szrj      that this is a recovery block.  */
1448*38fd1498Szrj   unsigned int dont_calc_deps : 1;
1449*38fd1498Szrj   /* This region has at least one non-trivial ebb.  */
1450*38fd1498Szrj   unsigned int has_real_ebb : 1;
1451*38fd1498Szrj };
1452*38fd1498Szrj 
1453*38fd1498Szrj extern int nr_regions;
1454*38fd1498Szrj extern region *rgn_table;
1455*38fd1498Szrj extern int *rgn_bb_table;
1456*38fd1498Szrj extern int *block_to_bb;
1457*38fd1498Szrj extern int *containing_rgn;
1458*38fd1498Szrj 
1459*38fd1498Szrj /* Often used short-hand in the scheduler.  The rest of the compiler uses
1460*38fd1498Szrj    BLOCK_FOR_INSN(INSN) and an indirect reference to get the basic block
1461*38fd1498Szrj    number ("index").  For historical reasons, the scheduler does not.  */
1462*38fd1498Szrj #define BLOCK_NUM(INSN)	      (BLOCK_FOR_INSN (INSN)->index + 0)
1463*38fd1498Szrj 
1464*38fd1498Szrj #define RGN_NR_BLOCKS(rgn) (rgn_table[rgn].rgn_nr_blocks)
1465*38fd1498Szrj #define RGN_BLOCKS(rgn) (rgn_table[rgn].rgn_blocks)
1466*38fd1498Szrj #define RGN_DONT_CALC_DEPS(rgn) (rgn_table[rgn].dont_calc_deps)
1467*38fd1498Szrj #define RGN_HAS_REAL_EBB(rgn) (rgn_table[rgn].has_real_ebb)
1468*38fd1498Szrj #define BLOCK_TO_BB(block) (block_to_bb[block])
1469*38fd1498Szrj #define CONTAINING_RGN(block) (containing_rgn[block])
1470*38fd1498Szrj 
1471*38fd1498Szrj /* The mapping from ebb to block.  */
1472*38fd1498Szrj extern int *ebb_head;
1473*38fd1498Szrj #define BB_TO_BLOCK(ebb) (rgn_bb_table[ebb_head[ebb]])
1474*38fd1498Szrj #define EBB_FIRST_BB(ebb) BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (ebb))
1475*38fd1498Szrj #define EBB_LAST_BB(ebb) \
1476*38fd1498Szrj   BASIC_BLOCK_FOR_FN (cfun, rgn_bb_table[ebb_head[ebb + 1] - 1])
1477*38fd1498Szrj #define INSN_BB(INSN) (BLOCK_TO_BB (BLOCK_NUM (INSN)))
1478*38fd1498Szrj 
1479*38fd1498Szrj extern int current_nr_blocks;
1480*38fd1498Szrj extern int current_blocks;
1481*38fd1498Szrj extern int target_bb;
1482*38fd1498Szrj extern bool sched_no_dce;
1483*38fd1498Szrj 
1484*38fd1498Szrj extern void set_modulo_params (int, int, int, int);
1485*38fd1498Szrj extern void record_delay_slot_pair (rtx_insn *, rtx_insn *, int, int);
1486*38fd1498Szrj extern rtx_insn *real_insn_for_shadow (rtx_insn *);
1487*38fd1498Szrj extern void discard_delay_pairs_above (int);
1488*38fd1498Szrj extern void free_delay_pairs (void);
1489*38fd1498Szrj extern void add_delay_dependencies (rtx_insn *);
1490*38fd1498Szrj extern bool sched_is_disabled_for_current_region_p (void);
1491*38fd1498Szrj extern void sched_rgn_init (bool);
1492*38fd1498Szrj extern void sched_rgn_finish (void);
1493*38fd1498Szrj extern void rgn_setup_region (int);
1494*38fd1498Szrj extern void sched_rgn_compute_dependencies (int);
1495*38fd1498Szrj extern void sched_rgn_local_init (int);
1496*38fd1498Szrj extern void sched_rgn_local_finish (void);
1497*38fd1498Szrj extern void sched_rgn_local_free (void);
1498*38fd1498Szrj extern void extend_regions (void);
1499*38fd1498Szrj extern void rgn_make_new_region_out_of_new_block (basic_block);
1500*38fd1498Szrj 
1501*38fd1498Szrj extern void compute_priorities (void);
1502*38fd1498Szrj extern void increase_insn_priority (rtx_insn *, int);
1503*38fd1498Szrj extern void debug_rgn_dependencies (int);
1504*38fd1498Szrj extern void debug_dependencies (rtx_insn *, rtx_insn *);
1505*38fd1498Szrj extern void dump_rgn_dependencies_dot (FILE *);
1506*38fd1498Szrj extern void dump_rgn_dependencies_dot (const char *);
1507*38fd1498Szrj 
1508*38fd1498Szrj extern void free_rgn_deps (void);
1509*38fd1498Szrj extern int contributes_to_priority (rtx_insn *, rtx_insn *);
1510*38fd1498Szrj extern void extend_rgns (int *, int *, sbitmap, int *);
1511*38fd1498Szrj extern void deps_join (struct deps_desc *, struct deps_desc *);
1512*38fd1498Szrj 
1513*38fd1498Szrj extern void rgn_setup_common_sched_info (void);
1514*38fd1498Szrj extern void rgn_setup_sched_infos (void);
1515*38fd1498Szrj 
1516*38fd1498Szrj extern void debug_regions (void);
1517*38fd1498Szrj extern void debug_region (int);
1518*38fd1498Szrj extern void dump_region_dot (FILE *, int);
1519*38fd1498Szrj extern void dump_region_dot_file (const char *, int);
1520*38fd1498Szrj 
1521*38fd1498Szrj extern void haifa_sched_init (void);
1522*38fd1498Szrj extern void haifa_sched_finish (void);
1523*38fd1498Szrj 
1524*38fd1498Szrj extern void find_modifiable_mems (rtx_insn *, rtx_insn *);
1525*38fd1498Szrj 
1526*38fd1498Szrj /* sched-deps.c interface to walk, add, search, update, resolve, delete
1527*38fd1498Szrj    and debug instruction dependencies.  */
1528*38fd1498Szrj 
1529*38fd1498Szrj /* Constants defining dependences lists.  */
1530*38fd1498Szrj 
1531*38fd1498Szrj /* No list.  */
1532*38fd1498Szrj #define SD_LIST_NONE (0)
1533*38fd1498Szrj 
1534*38fd1498Szrj /* hard_back_deps.  */
1535*38fd1498Szrj #define SD_LIST_HARD_BACK (1)
1536*38fd1498Szrj 
1537*38fd1498Szrj /* spec_back_deps.  */
1538*38fd1498Szrj #define SD_LIST_SPEC_BACK (2)
1539*38fd1498Szrj 
1540*38fd1498Szrj /* forw_deps.  */
1541*38fd1498Szrj #define SD_LIST_FORW (4)
1542*38fd1498Szrj 
1543*38fd1498Szrj /* resolved_back_deps.  */
1544*38fd1498Szrj #define SD_LIST_RES_BACK (8)
1545*38fd1498Szrj 
1546*38fd1498Szrj /* resolved_forw_deps.  */
1547*38fd1498Szrj #define SD_LIST_RES_FORW (16)
1548*38fd1498Szrj 
1549*38fd1498Szrj #define SD_LIST_BACK (SD_LIST_HARD_BACK | SD_LIST_SPEC_BACK)
1550*38fd1498Szrj 
1551*38fd1498Szrj /* A type to hold above flags.  */
1552*38fd1498Szrj typedef int sd_list_types_def;
1553*38fd1498Szrj 
1554*38fd1498Szrj extern void sd_next_list (const_rtx, sd_list_types_def *, deps_list_t *, bool *);
1555*38fd1498Szrj 
1556*38fd1498Szrj /* Iterator to walk through, resolve and delete dependencies.  */
1557*38fd1498Szrj struct _sd_iterator
1558*38fd1498Szrj {
1559*38fd1498Szrj   /* What lists to walk.  Can be any combination of SD_LIST_* flags.  */
1560*38fd1498Szrj   sd_list_types_def types;
1561*38fd1498Szrj 
1562*38fd1498Szrj   /* Instruction dependencies lists of which will be walked.  */
1563*38fd1498Szrj   rtx insn;
1564*38fd1498Szrj 
1565*38fd1498Szrj   /* Pointer to the next field of the previous element.  This is not
1566*38fd1498Szrj      simply a pointer to the next element to allow easy deletion from the
1567*38fd1498Szrj      list.  When a dep is being removed from the list the iterator
1568*38fd1498Szrj      will automatically advance because the value in *linkp will start
1569*38fd1498Szrj      referring to the next element.  */
1570*38fd1498Szrj   dep_link_t *linkp;
1571*38fd1498Szrj 
1572*38fd1498Szrj   /* True if the current list is a resolved one.  */
1573*38fd1498Szrj   bool resolved_p;
1574*38fd1498Szrj };
1575*38fd1498Szrj 
1576*38fd1498Szrj typedef struct _sd_iterator sd_iterator_def;
1577*38fd1498Szrj 
1578*38fd1498Szrj /* ??? We can move some definitions that are used in below inline functions
1579*38fd1498Szrj    out of sched-int.h to sched-deps.c provided that the below functions will
1580*38fd1498Szrj    become global externals.
1581*38fd1498Szrj    These definitions include:
1582*38fd1498Szrj    * struct _deps_list: opaque pointer is needed at global scope.
1583*38fd1498Szrj    * struct _dep_link: opaque pointer is needed at scope of sd_iterator_def.
1584*38fd1498Szrj    * struct _dep_node: opaque pointer is needed at scope of
1585*38fd1498Szrj    struct _deps_link.  */
1586*38fd1498Szrj 
1587*38fd1498Szrj /* Return initialized iterator.  */
1588*38fd1498Szrj static inline sd_iterator_def
sd_iterator_start(rtx insn,sd_list_types_def types)1589*38fd1498Szrj sd_iterator_start (rtx insn, sd_list_types_def types)
1590*38fd1498Szrj {
1591*38fd1498Szrj   /* Some dep_link a pointer to which will return NULL.  */
1592*38fd1498Szrj   static dep_link_t null_link = NULL;
1593*38fd1498Szrj 
1594*38fd1498Szrj   sd_iterator_def i;
1595*38fd1498Szrj 
1596*38fd1498Szrj   i.types = types;
1597*38fd1498Szrj   i.insn = insn;
1598*38fd1498Szrj   i.linkp = &null_link;
1599*38fd1498Szrj 
1600*38fd1498Szrj   /* Avoid 'uninitialized warning'.  */
1601*38fd1498Szrj   i.resolved_p = false;
1602*38fd1498Szrj 
1603*38fd1498Szrj   return i;
1604*38fd1498Szrj }
1605*38fd1498Szrj 
1606*38fd1498Szrj /* Return the current element.  */
1607*38fd1498Szrj static inline bool
sd_iterator_cond(sd_iterator_def * it_ptr,dep_t * dep_ptr)1608*38fd1498Szrj sd_iterator_cond (sd_iterator_def *it_ptr, dep_t *dep_ptr)
1609*38fd1498Szrj {
1610*38fd1498Szrj   while (true)
1611*38fd1498Szrj     {
1612*38fd1498Szrj       dep_link_t link = *it_ptr->linkp;
1613*38fd1498Szrj 
1614*38fd1498Szrj       if (link != NULL)
1615*38fd1498Szrj 	{
1616*38fd1498Szrj 	  *dep_ptr = DEP_LINK_DEP (link);
1617*38fd1498Szrj 	  return true;
1618*38fd1498Szrj 	}
1619*38fd1498Szrj       else
1620*38fd1498Szrj 	{
1621*38fd1498Szrj 	  sd_list_types_def types = it_ptr->types;
1622*38fd1498Szrj 
1623*38fd1498Szrj 	  if (types != SD_LIST_NONE)
1624*38fd1498Szrj 	    /* Switch to next list.  */
1625*38fd1498Szrj 	    {
1626*38fd1498Szrj 	      deps_list_t list;
1627*38fd1498Szrj 
1628*38fd1498Szrj 	      sd_next_list (it_ptr->insn,
1629*38fd1498Szrj 			    &it_ptr->types, &list, &it_ptr->resolved_p);
1630*38fd1498Szrj 
1631*38fd1498Szrj 	      if (list)
1632*38fd1498Szrj 		{
1633*38fd1498Szrj 		  it_ptr->linkp = &DEPS_LIST_FIRST (list);
1634*38fd1498Szrj 		  continue;
1635*38fd1498Szrj 		}
1636*38fd1498Szrj 	    }
1637*38fd1498Szrj 
1638*38fd1498Szrj 	  *dep_ptr = NULL;
1639*38fd1498Szrj 	  return false;
1640*38fd1498Szrj 	}
1641*38fd1498Szrj    }
1642*38fd1498Szrj }
1643*38fd1498Szrj 
1644*38fd1498Szrj /* Advance iterator.  */
1645*38fd1498Szrj static inline void
sd_iterator_next(sd_iterator_def * it_ptr)1646*38fd1498Szrj sd_iterator_next (sd_iterator_def *it_ptr)
1647*38fd1498Szrj {
1648*38fd1498Szrj   it_ptr->linkp = &DEP_LINK_NEXT (*it_ptr->linkp);
1649*38fd1498Szrj }
1650*38fd1498Szrj 
1651*38fd1498Szrj /* A cycle wrapper.  */
1652*38fd1498Szrj #define FOR_EACH_DEP(INSN, LIST_TYPES, ITER, DEP)		\
1653*38fd1498Szrj   for ((ITER) = sd_iterator_start ((INSN), (LIST_TYPES));	\
1654*38fd1498Szrj        sd_iterator_cond (&(ITER), &(DEP));			\
1655*38fd1498Szrj        sd_iterator_next (&(ITER)))
1656*38fd1498Szrj 
1657*38fd1498Szrj #define IS_DISPATCH_ON 1
1658*38fd1498Szrj #define IS_CMP 2
1659*38fd1498Szrj #define DISPATCH_VIOLATION 3
1660*38fd1498Szrj #define FITS_DISPATCH_WINDOW 4
1661*38fd1498Szrj #define DISPATCH_INIT 5
1662*38fd1498Szrj #define ADD_TO_DISPATCH_WINDOW 6
1663*38fd1498Szrj 
1664*38fd1498Szrj extern int sd_lists_size (const_rtx, sd_list_types_def);
1665*38fd1498Szrj extern bool sd_lists_empty_p (const_rtx, sd_list_types_def);
1666*38fd1498Szrj extern void sd_init_insn (rtx_insn *);
1667*38fd1498Szrj extern void sd_finish_insn (rtx_insn *);
1668*38fd1498Szrj extern dep_t sd_find_dep_between (rtx, rtx, bool);
1669*38fd1498Szrj extern void sd_add_dep (dep_t, bool);
1670*38fd1498Szrj extern enum DEPS_ADJUST_RESULT sd_add_or_update_dep (dep_t, bool);
1671*38fd1498Szrj extern void sd_resolve_dep (sd_iterator_def);
1672*38fd1498Szrj extern void sd_unresolve_dep (sd_iterator_def);
1673*38fd1498Szrj extern void sd_copy_back_deps (rtx_insn *, rtx_insn *, bool);
1674*38fd1498Szrj extern void sd_delete_dep (sd_iterator_def);
1675*38fd1498Szrj extern void sd_debug_lists (rtx, sd_list_types_def);
1676*38fd1498Szrj 
1677*38fd1498Szrj /* Macros and declarations for scheduling fusion.  */
1678*38fd1498Szrj #define FUSION_MAX_PRIORITY (INT_MAX)
1679*38fd1498Szrj extern bool sched_fusion;
1680*38fd1498Szrj 
1681*38fd1498Szrj #endif /* INSN_SCHEDULING */
1682*38fd1498Szrj 
1683*38fd1498Szrj #endif /* GCC_SCHED_INT_H */
1684*38fd1498Szrj 
1685