xref: /dragonfly/contrib/gcc-8.0/gcc/sched-deps.c (revision 38fd1498)
1*38fd1498Szrj /* Instruction scheduling pass.  This file computes dependencies between
2*38fd1498Szrj    instructions.
3*38fd1498Szrj    Copyright (C) 1992-2018 Free Software Foundation, Inc.
4*38fd1498Szrj    Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5*38fd1498Szrj    and currently maintained by, Jim Wilson (wilson@cygnus.com)
6*38fd1498Szrj 
7*38fd1498Szrj This file is part of GCC.
8*38fd1498Szrj 
9*38fd1498Szrj GCC is free software; you can redistribute it and/or modify it under
10*38fd1498Szrj the terms of the GNU General Public License as published by the Free
11*38fd1498Szrj Software Foundation; either version 3, or (at your option) any later
12*38fd1498Szrj version.
13*38fd1498Szrj 
14*38fd1498Szrj GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15*38fd1498Szrj WARRANTY; without even the implied warranty of MERCHANTABILITY or
16*38fd1498Szrj FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
17*38fd1498Szrj for more details.
18*38fd1498Szrj 
19*38fd1498Szrj You should have received a copy of the GNU General Public License
20*38fd1498Szrj along with GCC; see the file COPYING3.  If not see
21*38fd1498Szrj <http://www.gnu.org/licenses/>.  */
22*38fd1498Szrj 
23*38fd1498Szrj #include "config.h"
24*38fd1498Szrj #include "system.h"
25*38fd1498Szrj #include "coretypes.h"
26*38fd1498Szrj #include "backend.h"
27*38fd1498Szrj #include "target.h"
28*38fd1498Szrj #include "rtl.h"
29*38fd1498Szrj #include "tree.h"
30*38fd1498Szrj #include "df.h"
31*38fd1498Szrj #include "insn-config.h"
32*38fd1498Szrj #include "regs.h"
33*38fd1498Szrj #include "memmodel.h"
34*38fd1498Szrj #include "ira.h"
35*38fd1498Szrj #include "ira-int.h"
36*38fd1498Szrj #include "insn-attr.h"
37*38fd1498Szrj #include "cfgbuild.h"
38*38fd1498Szrj #include "sched-int.h"
39*38fd1498Szrj #include "params.h"
40*38fd1498Szrj #include "cselib.h"
41*38fd1498Szrj 
42*38fd1498Szrj #ifdef INSN_SCHEDULING
43*38fd1498Szrj 
44*38fd1498Szrj /* Holds current parameters for the dependency analyzer.  */
45*38fd1498Szrj struct sched_deps_info_def *sched_deps_info;
46*38fd1498Szrj 
47*38fd1498Szrj /* The data is specific to the Haifa scheduler.  */
48*38fd1498Szrj vec<haifa_deps_insn_data_def>
49*38fd1498Szrj     h_d_i_d = vNULL;
50*38fd1498Szrj 
51*38fd1498Szrj /* Return the major type present in the DS.  */
52*38fd1498Szrj enum reg_note
ds_to_dk(ds_t ds)53*38fd1498Szrj ds_to_dk (ds_t ds)
54*38fd1498Szrj {
55*38fd1498Szrj   if (ds & DEP_TRUE)
56*38fd1498Szrj     return REG_DEP_TRUE;
57*38fd1498Szrj 
58*38fd1498Szrj   if (ds & DEP_OUTPUT)
59*38fd1498Szrj     return REG_DEP_OUTPUT;
60*38fd1498Szrj 
61*38fd1498Szrj   if (ds & DEP_CONTROL)
62*38fd1498Szrj     return REG_DEP_CONTROL;
63*38fd1498Szrj 
64*38fd1498Szrj   gcc_assert (ds & DEP_ANTI);
65*38fd1498Szrj 
66*38fd1498Szrj   return REG_DEP_ANTI;
67*38fd1498Szrj }
68*38fd1498Szrj 
69*38fd1498Szrj /* Return equivalent dep_status.  */
70*38fd1498Szrj ds_t
dk_to_ds(enum reg_note dk)71*38fd1498Szrj dk_to_ds (enum reg_note dk)
72*38fd1498Szrj {
73*38fd1498Szrj   switch (dk)
74*38fd1498Szrj     {
75*38fd1498Szrj     case REG_DEP_TRUE:
76*38fd1498Szrj       return DEP_TRUE;
77*38fd1498Szrj 
78*38fd1498Szrj     case REG_DEP_OUTPUT:
79*38fd1498Szrj       return DEP_OUTPUT;
80*38fd1498Szrj 
81*38fd1498Szrj     case REG_DEP_CONTROL:
82*38fd1498Szrj       return DEP_CONTROL;
83*38fd1498Szrj 
84*38fd1498Szrj     default:
85*38fd1498Szrj       gcc_assert (dk == REG_DEP_ANTI);
86*38fd1498Szrj       return DEP_ANTI;
87*38fd1498Szrj     }
88*38fd1498Szrj }
89*38fd1498Szrj 
90*38fd1498Szrj /* Functions to operate with dependence information container - dep_t.  */
91*38fd1498Szrj 
92*38fd1498Szrj /* Init DEP with the arguments.  */
93*38fd1498Szrj void
init_dep_1(dep_t dep,rtx_insn * pro,rtx_insn * con,enum reg_note type,ds_t ds)94*38fd1498Szrj init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
95*38fd1498Szrj {
96*38fd1498Szrj   DEP_PRO (dep) = pro;
97*38fd1498Szrj   DEP_CON (dep) = con;
98*38fd1498Szrj   DEP_TYPE (dep) = type;
99*38fd1498Szrj   DEP_STATUS (dep) = ds;
100*38fd1498Szrj   DEP_COST (dep) = UNKNOWN_DEP_COST;
101*38fd1498Szrj   DEP_NONREG (dep) = 0;
102*38fd1498Szrj   DEP_MULTIPLE (dep) = 0;
103*38fd1498Szrj   DEP_REPLACE (dep) = NULL;
104*38fd1498Szrj }
105*38fd1498Szrj 
106*38fd1498Szrj /* Init DEP with the arguments.
107*38fd1498Szrj    While most of the scheduler (including targets) only need the major type
108*38fd1498Szrj    of the dependency, it is convenient to hide full dep_status from them.  */
109*38fd1498Szrj void
init_dep(dep_t dep,rtx_insn * pro,rtx_insn * con,enum reg_note kind)110*38fd1498Szrj init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
111*38fd1498Szrj {
112*38fd1498Szrj   ds_t ds;
113*38fd1498Szrj 
114*38fd1498Szrj   if ((current_sched_info->flags & USE_DEPS_LIST))
115*38fd1498Szrj     ds = dk_to_ds (kind);
116*38fd1498Szrj   else
117*38fd1498Szrj     ds = 0;
118*38fd1498Szrj 
119*38fd1498Szrj   init_dep_1 (dep, pro, con, kind, ds);
120*38fd1498Szrj }
121*38fd1498Szrj 
122*38fd1498Szrj /* Make a copy of FROM in TO.  */
123*38fd1498Szrj static void
copy_dep(dep_t to,dep_t from)124*38fd1498Szrj copy_dep (dep_t to, dep_t from)
125*38fd1498Szrj {
126*38fd1498Szrj   memcpy (to, from, sizeof (*to));
127*38fd1498Szrj }
128*38fd1498Szrj 
129*38fd1498Szrj static void dump_ds (FILE *, ds_t);
130*38fd1498Szrj 
131*38fd1498Szrj /* Define flags for dump_dep ().  */
132*38fd1498Szrj 
133*38fd1498Szrj /* Dump producer of the dependence.  */
134*38fd1498Szrj #define DUMP_DEP_PRO (2)
135*38fd1498Szrj 
136*38fd1498Szrj /* Dump consumer of the dependence.  */
137*38fd1498Szrj #define DUMP_DEP_CON (4)
138*38fd1498Szrj 
139*38fd1498Szrj /* Dump type of the dependence.  */
140*38fd1498Szrj #define DUMP_DEP_TYPE (8)
141*38fd1498Szrj 
142*38fd1498Szrj /* Dump status of the dependence.  */
143*38fd1498Szrj #define DUMP_DEP_STATUS (16)
144*38fd1498Szrj 
145*38fd1498Szrj /* Dump all information about the dependence.  */
146*38fd1498Szrj #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE	\
147*38fd1498Szrj 		      |DUMP_DEP_STATUS)
148*38fd1498Szrj 
149*38fd1498Szrj /* Dump DEP to DUMP.
150*38fd1498Szrj    FLAGS is a bit mask specifying what information about DEP needs
151*38fd1498Szrj    to be printed.
152*38fd1498Szrj    If FLAGS has the very first bit set, then dump all information about DEP
153*38fd1498Szrj    and propagate this bit into the callee dump functions.  */
154*38fd1498Szrj static void
dump_dep(FILE * dump,dep_t dep,int flags)155*38fd1498Szrj dump_dep (FILE *dump, dep_t dep, int flags)
156*38fd1498Szrj {
157*38fd1498Szrj   if (flags & 1)
158*38fd1498Szrj     flags |= DUMP_DEP_ALL;
159*38fd1498Szrj 
160*38fd1498Szrj   fprintf (dump, "<");
161*38fd1498Szrj 
162*38fd1498Szrj   if (flags & DUMP_DEP_PRO)
163*38fd1498Szrj     fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
164*38fd1498Szrj 
165*38fd1498Szrj   if (flags & DUMP_DEP_CON)
166*38fd1498Szrj     fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
167*38fd1498Szrj 
168*38fd1498Szrj   if (flags & DUMP_DEP_TYPE)
169*38fd1498Szrj     {
170*38fd1498Szrj       char t;
171*38fd1498Szrj       enum reg_note type = DEP_TYPE (dep);
172*38fd1498Szrj 
173*38fd1498Szrj       switch (type)
174*38fd1498Szrj 	{
175*38fd1498Szrj 	case REG_DEP_TRUE:
176*38fd1498Szrj 	  t = 't';
177*38fd1498Szrj 	  break;
178*38fd1498Szrj 
179*38fd1498Szrj 	case REG_DEP_OUTPUT:
180*38fd1498Szrj 	  t = 'o';
181*38fd1498Szrj 	  break;
182*38fd1498Szrj 
183*38fd1498Szrj 	case REG_DEP_CONTROL:
184*38fd1498Szrj 	  t = 'c';
185*38fd1498Szrj 	  break;
186*38fd1498Szrj 
187*38fd1498Szrj 	case REG_DEP_ANTI:
188*38fd1498Szrj 	  t = 'a';
189*38fd1498Szrj 	  break;
190*38fd1498Szrj 
191*38fd1498Szrj 	default:
192*38fd1498Szrj 	  gcc_unreachable ();
193*38fd1498Szrj 	  break;
194*38fd1498Szrj 	}
195*38fd1498Szrj 
196*38fd1498Szrj       fprintf (dump, "%c; ", t);
197*38fd1498Szrj     }
198*38fd1498Szrj 
199*38fd1498Szrj   if (flags & DUMP_DEP_STATUS)
200*38fd1498Szrj     {
201*38fd1498Szrj       if (current_sched_info->flags & USE_DEPS_LIST)
202*38fd1498Szrj 	dump_ds (dump, DEP_STATUS (dep));
203*38fd1498Szrj     }
204*38fd1498Szrj 
205*38fd1498Szrj   fprintf (dump, ">");
206*38fd1498Szrj }
207*38fd1498Szrj 
208*38fd1498Szrj /* Default flags for dump_dep ().  */
209*38fd1498Szrj static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
210*38fd1498Szrj 
211*38fd1498Szrj /* Dump all fields of DEP to STDERR.  */
212*38fd1498Szrj void
sd_debug_dep(dep_t dep)213*38fd1498Szrj sd_debug_dep (dep_t dep)
214*38fd1498Szrj {
215*38fd1498Szrj   dump_dep (stderr, dep, 1);
216*38fd1498Szrj   fprintf (stderr, "\n");
217*38fd1498Szrj }
218*38fd1498Szrj 
219*38fd1498Szrj /* Determine whether DEP is a dependency link of a non-debug insn on a
220*38fd1498Szrj    debug insn.  */
221*38fd1498Szrj 
222*38fd1498Szrj static inline bool
depl_on_debug_p(dep_link_t dep)223*38fd1498Szrj depl_on_debug_p (dep_link_t dep)
224*38fd1498Szrj {
225*38fd1498Szrj   return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
226*38fd1498Szrj 	  && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
227*38fd1498Szrj }
228*38fd1498Szrj 
229*38fd1498Szrj /* Functions to operate with a single link from the dependencies lists -
230*38fd1498Szrj    dep_link_t.  */
231*38fd1498Szrj 
232*38fd1498Szrj /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
233*38fd1498Szrj    PREV_NEXT_P.  */
234*38fd1498Szrj static void
attach_dep_link(dep_link_t l,dep_link_t * prev_nextp)235*38fd1498Szrj attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
236*38fd1498Szrj {
237*38fd1498Szrj   dep_link_t next = *prev_nextp;
238*38fd1498Szrj 
239*38fd1498Szrj   gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
240*38fd1498Szrj 	      && DEP_LINK_NEXT (l) == NULL);
241*38fd1498Szrj 
242*38fd1498Szrj   /* Init node being inserted.  */
243*38fd1498Szrj   DEP_LINK_PREV_NEXTP (l) = prev_nextp;
244*38fd1498Szrj   DEP_LINK_NEXT (l) = next;
245*38fd1498Szrj 
246*38fd1498Szrj   /* Fix next node.  */
247*38fd1498Szrj   if (next != NULL)
248*38fd1498Szrj     {
249*38fd1498Szrj       gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
250*38fd1498Szrj 
251*38fd1498Szrj       DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
252*38fd1498Szrj     }
253*38fd1498Szrj 
254*38fd1498Szrj   /* Fix prev node.  */
255*38fd1498Szrj   *prev_nextp = l;
256*38fd1498Szrj }
257*38fd1498Szrj 
258*38fd1498Szrj /* Add dep_link LINK to deps_list L.  */
259*38fd1498Szrj static void
add_to_deps_list(dep_link_t link,deps_list_t l)260*38fd1498Szrj add_to_deps_list (dep_link_t link, deps_list_t l)
261*38fd1498Szrj {
262*38fd1498Szrj   attach_dep_link (link, &DEPS_LIST_FIRST (l));
263*38fd1498Szrj 
264*38fd1498Szrj   /* Don't count debug deps.  */
265*38fd1498Szrj   if (!depl_on_debug_p (link))
266*38fd1498Szrj     ++DEPS_LIST_N_LINKS (l);
267*38fd1498Szrj }
268*38fd1498Szrj 
269*38fd1498Szrj /* Detach dep_link L from the list.  */
270*38fd1498Szrj static void
detach_dep_link(dep_link_t l)271*38fd1498Szrj detach_dep_link (dep_link_t l)
272*38fd1498Szrj {
273*38fd1498Szrj   dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
274*38fd1498Szrj   dep_link_t next = DEP_LINK_NEXT (l);
275*38fd1498Szrj 
276*38fd1498Szrj   *prev_nextp = next;
277*38fd1498Szrj 
278*38fd1498Szrj   if (next != NULL)
279*38fd1498Szrj     DEP_LINK_PREV_NEXTP (next) = prev_nextp;
280*38fd1498Szrj 
281*38fd1498Szrj   DEP_LINK_PREV_NEXTP (l) = NULL;
282*38fd1498Szrj   DEP_LINK_NEXT (l) = NULL;
283*38fd1498Szrj }
284*38fd1498Szrj 
285*38fd1498Szrj /* Remove link LINK from list LIST.  */
286*38fd1498Szrj static void
remove_from_deps_list(dep_link_t link,deps_list_t list)287*38fd1498Szrj remove_from_deps_list (dep_link_t link, deps_list_t list)
288*38fd1498Szrj {
289*38fd1498Szrj   detach_dep_link (link);
290*38fd1498Szrj 
291*38fd1498Szrj   /* Don't count debug deps.  */
292*38fd1498Szrj   if (!depl_on_debug_p (link))
293*38fd1498Szrj     --DEPS_LIST_N_LINKS (list);
294*38fd1498Szrj }
295*38fd1498Szrj 
296*38fd1498Szrj /* Move link LINK from list FROM to list TO.  */
297*38fd1498Szrj static void
move_dep_link(dep_link_t link,deps_list_t from,deps_list_t to)298*38fd1498Szrj move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
299*38fd1498Szrj {
300*38fd1498Szrj   remove_from_deps_list (link, from);
301*38fd1498Szrj   add_to_deps_list (link, to);
302*38fd1498Szrj }
303*38fd1498Szrj 
304*38fd1498Szrj /* Return true of LINK is not attached to any list.  */
305*38fd1498Szrj static bool
dep_link_is_detached_p(dep_link_t link)306*38fd1498Szrj dep_link_is_detached_p (dep_link_t link)
307*38fd1498Szrj {
308*38fd1498Szrj   return DEP_LINK_PREV_NEXTP (link) == NULL;
309*38fd1498Szrj }
310*38fd1498Szrj 
311*38fd1498Szrj /* Pool to hold all dependency nodes (dep_node_t).  */
312*38fd1498Szrj static object_allocator<_dep_node> *dn_pool;
313*38fd1498Szrj 
314*38fd1498Szrj /* Number of dep_nodes out there.  */
315*38fd1498Szrj static int dn_pool_diff = 0;
316*38fd1498Szrj 
317*38fd1498Szrj /* Create a dep_node.  */
318*38fd1498Szrj static dep_node_t
create_dep_node(void)319*38fd1498Szrj create_dep_node (void)
320*38fd1498Szrj {
321*38fd1498Szrj   dep_node_t n = dn_pool->allocate ();
322*38fd1498Szrj   dep_link_t back = DEP_NODE_BACK (n);
323*38fd1498Szrj   dep_link_t forw = DEP_NODE_FORW (n);
324*38fd1498Szrj 
325*38fd1498Szrj   DEP_LINK_NODE (back) = n;
326*38fd1498Szrj   DEP_LINK_NEXT (back) = NULL;
327*38fd1498Szrj   DEP_LINK_PREV_NEXTP (back) = NULL;
328*38fd1498Szrj 
329*38fd1498Szrj   DEP_LINK_NODE (forw) = n;
330*38fd1498Szrj   DEP_LINK_NEXT (forw) = NULL;
331*38fd1498Szrj   DEP_LINK_PREV_NEXTP (forw) = NULL;
332*38fd1498Szrj 
333*38fd1498Szrj   ++dn_pool_diff;
334*38fd1498Szrj 
335*38fd1498Szrj   return n;
336*38fd1498Szrj }
337*38fd1498Szrj 
338*38fd1498Szrj /* Delete dep_node N.  N must not be connected to any deps_list.  */
339*38fd1498Szrj static void
delete_dep_node(dep_node_t n)340*38fd1498Szrj delete_dep_node (dep_node_t n)
341*38fd1498Szrj {
342*38fd1498Szrj   gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
343*38fd1498Szrj 	      && dep_link_is_detached_p (DEP_NODE_FORW (n)));
344*38fd1498Szrj 
345*38fd1498Szrj   XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
346*38fd1498Szrj 
347*38fd1498Szrj   --dn_pool_diff;
348*38fd1498Szrj 
349*38fd1498Szrj   dn_pool->remove (n);
350*38fd1498Szrj }
351*38fd1498Szrj 
352*38fd1498Szrj /* Pool to hold dependencies lists (deps_list_t).  */
353*38fd1498Szrj static object_allocator<_deps_list> *dl_pool;
354*38fd1498Szrj 
355*38fd1498Szrj /* Number of deps_lists out there.  */
356*38fd1498Szrj static int dl_pool_diff = 0;
357*38fd1498Szrj 
358*38fd1498Szrj /* Functions to operate with dependences lists - deps_list_t.  */
359*38fd1498Szrj 
360*38fd1498Szrj /* Return true if list L is empty.  */
361*38fd1498Szrj static bool
deps_list_empty_p(deps_list_t l)362*38fd1498Szrj deps_list_empty_p (deps_list_t l)
363*38fd1498Szrj {
364*38fd1498Szrj   return DEPS_LIST_N_LINKS (l) == 0;
365*38fd1498Szrj }
366*38fd1498Szrj 
367*38fd1498Szrj /* Create a new deps_list.  */
368*38fd1498Szrj static deps_list_t
create_deps_list(void)369*38fd1498Szrj create_deps_list (void)
370*38fd1498Szrj {
371*38fd1498Szrj   deps_list_t l = dl_pool->allocate ();
372*38fd1498Szrj 
373*38fd1498Szrj   DEPS_LIST_FIRST (l) = NULL;
374*38fd1498Szrj   DEPS_LIST_N_LINKS (l) = 0;
375*38fd1498Szrj 
376*38fd1498Szrj   ++dl_pool_diff;
377*38fd1498Szrj   return l;
378*38fd1498Szrj }
379*38fd1498Szrj 
380*38fd1498Szrj /* Free deps_list L.  */
381*38fd1498Szrj static void
free_deps_list(deps_list_t l)382*38fd1498Szrj free_deps_list (deps_list_t l)
383*38fd1498Szrj {
384*38fd1498Szrj   gcc_assert (deps_list_empty_p (l));
385*38fd1498Szrj 
386*38fd1498Szrj   --dl_pool_diff;
387*38fd1498Szrj 
388*38fd1498Szrj   dl_pool->remove (l);
389*38fd1498Szrj }
390*38fd1498Szrj 
391*38fd1498Szrj /* Return true if there is no dep_nodes and deps_lists out there.
392*38fd1498Szrj    After the region is scheduled all the dependency nodes and lists
393*38fd1498Szrj    should [generally] be returned to pool.  */
394*38fd1498Szrj bool
deps_pools_are_empty_p(void)395*38fd1498Szrj deps_pools_are_empty_p (void)
396*38fd1498Szrj {
397*38fd1498Szrj   return dn_pool_diff == 0 && dl_pool_diff == 0;
398*38fd1498Szrj }
399*38fd1498Szrj 
400*38fd1498Szrj /* Remove all elements from L.  */
401*38fd1498Szrj static void
clear_deps_list(deps_list_t l)402*38fd1498Szrj clear_deps_list (deps_list_t l)
403*38fd1498Szrj {
404*38fd1498Szrj   do
405*38fd1498Szrj     {
406*38fd1498Szrj       dep_link_t link = DEPS_LIST_FIRST (l);
407*38fd1498Szrj 
408*38fd1498Szrj       if (link == NULL)
409*38fd1498Szrj 	break;
410*38fd1498Szrj 
411*38fd1498Szrj       remove_from_deps_list (link, l);
412*38fd1498Szrj     }
413*38fd1498Szrj   while (1);
414*38fd1498Szrj }
415*38fd1498Szrj 
416*38fd1498Szrj /* Decide whether a dependency should be treated as a hard or a speculative
417*38fd1498Szrj    dependency.  */
418*38fd1498Szrj static bool
dep_spec_p(dep_t dep)419*38fd1498Szrj dep_spec_p (dep_t dep)
420*38fd1498Szrj {
421*38fd1498Szrj   if (current_sched_info->flags & DO_SPECULATION)
422*38fd1498Szrj     {
423*38fd1498Szrj       if (DEP_STATUS (dep) & SPECULATIVE)
424*38fd1498Szrj 	return true;
425*38fd1498Szrj     }
426*38fd1498Szrj   if (current_sched_info->flags & DO_PREDICATION)
427*38fd1498Szrj     {
428*38fd1498Szrj       if (DEP_TYPE (dep) == REG_DEP_CONTROL)
429*38fd1498Szrj 	return true;
430*38fd1498Szrj     }
431*38fd1498Szrj   if (DEP_REPLACE (dep) != NULL)
432*38fd1498Szrj     return true;
433*38fd1498Szrj   return false;
434*38fd1498Szrj }
435*38fd1498Szrj 
436*38fd1498Szrj static regset reg_pending_sets;
437*38fd1498Szrj static regset reg_pending_clobbers;
438*38fd1498Szrj static regset reg_pending_uses;
439*38fd1498Szrj static regset reg_pending_control_uses;
440*38fd1498Szrj static enum reg_pending_barrier_mode reg_pending_barrier;
441*38fd1498Szrj 
442*38fd1498Szrj /* Hard registers implicitly clobbered or used (or may be implicitly
443*38fd1498Szrj    clobbered or used) by the currently analyzed insn.  For example,
444*38fd1498Szrj    insn in its constraint has one register class.  Even if there is
445*38fd1498Szrj    currently no hard register in the insn, the particular hard
446*38fd1498Szrj    register will be in the insn after reload pass because the
447*38fd1498Szrj    constraint requires it.  */
448*38fd1498Szrj static HARD_REG_SET implicit_reg_pending_clobbers;
449*38fd1498Szrj static HARD_REG_SET implicit_reg_pending_uses;
450*38fd1498Szrj 
451*38fd1498Szrj /* To speed up the test for duplicate dependency links we keep a
452*38fd1498Szrj    record of dependencies created by add_dependence when the average
453*38fd1498Szrj    number of instructions in a basic block is very large.
454*38fd1498Szrj 
455*38fd1498Szrj    Studies have shown that there is typically around 5 instructions between
456*38fd1498Szrj    branches for typical C code.  So we can make a guess that the average
457*38fd1498Szrj    basic block is approximately 5 instructions long; we will choose 100X
458*38fd1498Szrj    the average size as a very large basic block.
459*38fd1498Szrj 
460*38fd1498Szrj    Each insn has associated bitmaps for its dependencies.  Each bitmap
461*38fd1498Szrj    has enough entries to represent a dependency on any other insn in
462*38fd1498Szrj    the insn chain.  All bitmap for true dependencies cache is
463*38fd1498Szrj    allocated then the rest two ones are also allocated.  */
464*38fd1498Szrj static bitmap_head *true_dependency_cache = NULL;
465*38fd1498Szrj static bitmap_head *output_dependency_cache = NULL;
466*38fd1498Szrj static bitmap_head *anti_dependency_cache = NULL;
467*38fd1498Szrj static bitmap_head *control_dependency_cache = NULL;
468*38fd1498Szrj static bitmap_head *spec_dependency_cache = NULL;
469*38fd1498Szrj static int cache_size;
470*38fd1498Szrj 
471*38fd1498Szrj /* True if we should mark added dependencies as a non-register deps.  */
472*38fd1498Szrj static bool mark_as_hard;
473*38fd1498Szrj 
474*38fd1498Szrj static int deps_may_trap_p (const_rtx);
475*38fd1498Szrj static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
476*38fd1498Szrj static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
477*38fd1498Szrj 				 enum reg_note, bool);
478*38fd1498Szrj static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
479*38fd1498Szrj 					  rtx_insn_list **, int, enum reg_note,
480*38fd1498Szrj 					  bool);
481*38fd1498Szrj static void delete_all_dependences (rtx_insn *);
482*38fd1498Szrj static void chain_to_prev_insn (rtx_insn *);
483*38fd1498Szrj 
484*38fd1498Szrj static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
485*38fd1498Szrj static void sched_analyze_1 (struct deps_desc *, rtx, rtx_insn *);
486*38fd1498Szrj static void sched_analyze_2 (struct deps_desc *, rtx, rtx_insn *);
487*38fd1498Szrj static void sched_analyze_insn (struct deps_desc *, rtx, rtx_insn *);
488*38fd1498Szrj 
489*38fd1498Szrj static bool sched_has_condition_p (const rtx_insn *);
490*38fd1498Szrj static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
491*38fd1498Szrj 
492*38fd1498Szrj static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
493*38fd1498Szrj 							  rtx, rtx);
494*38fd1498Szrj static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
495*38fd1498Szrj 
496*38fd1498Szrj static void check_dep (dep_t, bool);
497*38fd1498Szrj 
498*38fd1498Szrj 
499*38fd1498Szrj /* Return nonzero if a load of the memory reference MEM can cause a trap.  */
500*38fd1498Szrj 
501*38fd1498Szrj static int
deps_may_trap_p(const_rtx mem)502*38fd1498Szrj deps_may_trap_p (const_rtx mem)
503*38fd1498Szrj {
504*38fd1498Szrj   const_rtx addr = XEXP (mem, 0);
505*38fd1498Szrj 
506*38fd1498Szrj   if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
507*38fd1498Szrj     {
508*38fd1498Szrj       const_rtx t = get_reg_known_value (REGNO (addr));
509*38fd1498Szrj       if (t)
510*38fd1498Szrj 	addr = t;
511*38fd1498Szrj     }
512*38fd1498Szrj   return rtx_addr_can_trap_p (addr);
513*38fd1498Szrj }
514*38fd1498Szrj 
515*38fd1498Szrj 
516*38fd1498Szrj /* Find the condition under which INSN is executed.  If REV is not NULL,
517*38fd1498Szrj    it is set to TRUE when the returned comparison should be reversed
518*38fd1498Szrj    to get the actual condition.  */
519*38fd1498Szrj static rtx
sched_get_condition_with_rev_uncached(const rtx_insn * insn,bool * rev)520*38fd1498Szrj sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev)
521*38fd1498Szrj {
522*38fd1498Szrj   rtx pat = PATTERN (insn);
523*38fd1498Szrj   rtx src;
524*38fd1498Szrj 
525*38fd1498Szrj   if (rev)
526*38fd1498Szrj     *rev = false;
527*38fd1498Szrj 
528*38fd1498Szrj   if (GET_CODE (pat) == COND_EXEC)
529*38fd1498Szrj     return COND_EXEC_TEST (pat);
530*38fd1498Szrj 
531*38fd1498Szrj   if (!any_condjump_p (insn) || !onlyjump_p (insn))
532*38fd1498Szrj     return 0;
533*38fd1498Szrj 
534*38fd1498Szrj   src = SET_SRC (pc_set (insn));
535*38fd1498Szrj 
536*38fd1498Szrj   if (XEXP (src, 2) == pc_rtx)
537*38fd1498Szrj     return XEXP (src, 0);
538*38fd1498Szrj   else if (XEXP (src, 1) == pc_rtx)
539*38fd1498Szrj     {
540*38fd1498Szrj       rtx cond = XEXP (src, 0);
541*38fd1498Szrj       enum rtx_code revcode = reversed_comparison_code (cond, insn);
542*38fd1498Szrj 
543*38fd1498Szrj       if (revcode == UNKNOWN)
544*38fd1498Szrj 	return 0;
545*38fd1498Szrj 
546*38fd1498Szrj       if (rev)
547*38fd1498Szrj 	*rev = true;
548*38fd1498Szrj       return cond;
549*38fd1498Szrj     }
550*38fd1498Szrj 
551*38fd1498Szrj   return 0;
552*38fd1498Szrj }
553*38fd1498Szrj 
554*38fd1498Szrj /* Return the condition under which INSN does not execute (i.e.  the
555*38fd1498Szrj    not-taken condition for a conditional branch), or NULL if we cannot
556*38fd1498Szrj    find such a condition.  The caller should make a copy of the condition
557*38fd1498Szrj    before using it.  */
558*38fd1498Szrj rtx
sched_get_reverse_condition_uncached(const rtx_insn * insn)559*38fd1498Szrj sched_get_reverse_condition_uncached (const rtx_insn *insn)
560*38fd1498Szrj {
561*38fd1498Szrj   bool rev;
562*38fd1498Szrj   rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
563*38fd1498Szrj   if (cond == NULL_RTX)
564*38fd1498Szrj     return cond;
565*38fd1498Szrj   if (!rev)
566*38fd1498Szrj     {
567*38fd1498Szrj       enum rtx_code revcode = reversed_comparison_code (cond, insn);
568*38fd1498Szrj       cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
569*38fd1498Szrj 			     XEXP (cond, 0),
570*38fd1498Szrj 			     XEXP (cond, 1));
571*38fd1498Szrj     }
572*38fd1498Szrj   return cond;
573*38fd1498Szrj }
574*38fd1498Szrj 
575*38fd1498Szrj /* Caching variant of sched_get_condition_with_rev_uncached.
576*38fd1498Szrj    We only do actual work the first time we come here for an insn; the
577*38fd1498Szrj    results are cached in INSN_CACHED_COND and INSN_REVERSE_COND.  */
578*38fd1498Szrj static rtx
sched_get_condition_with_rev(const rtx_insn * insn,bool * rev)579*38fd1498Szrj sched_get_condition_with_rev (const rtx_insn *insn, bool *rev)
580*38fd1498Szrj {
581*38fd1498Szrj   bool tmp;
582*38fd1498Szrj 
583*38fd1498Szrj   if (INSN_LUID (insn) == 0)
584*38fd1498Szrj     return sched_get_condition_with_rev_uncached (insn, rev);
585*38fd1498Szrj 
586*38fd1498Szrj   if (INSN_CACHED_COND (insn) == const_true_rtx)
587*38fd1498Szrj     return NULL_RTX;
588*38fd1498Szrj 
589*38fd1498Szrj   if (INSN_CACHED_COND (insn) != NULL_RTX)
590*38fd1498Szrj     {
591*38fd1498Szrj       if (rev)
592*38fd1498Szrj 	*rev = INSN_REVERSE_COND (insn);
593*38fd1498Szrj       return INSN_CACHED_COND (insn);
594*38fd1498Szrj     }
595*38fd1498Szrj 
596*38fd1498Szrj   INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
597*38fd1498Szrj   INSN_REVERSE_COND (insn) = tmp;
598*38fd1498Szrj 
599*38fd1498Szrj   if (INSN_CACHED_COND (insn) == NULL_RTX)
600*38fd1498Szrj     {
601*38fd1498Szrj       INSN_CACHED_COND (insn) = const_true_rtx;
602*38fd1498Szrj       return NULL_RTX;
603*38fd1498Szrj     }
604*38fd1498Szrj 
605*38fd1498Szrj   if (rev)
606*38fd1498Szrj     *rev = INSN_REVERSE_COND (insn);
607*38fd1498Szrj   return INSN_CACHED_COND (insn);
608*38fd1498Szrj }
609*38fd1498Szrj 
610*38fd1498Szrj /* True when we can find a condition under which INSN is executed.  */
611*38fd1498Szrj static bool
sched_has_condition_p(const rtx_insn * insn)612*38fd1498Szrj sched_has_condition_p (const rtx_insn *insn)
613*38fd1498Szrj {
614*38fd1498Szrj   return !! sched_get_condition_with_rev (insn, NULL);
615*38fd1498Szrj }
616*38fd1498Szrj 
617*38fd1498Szrj 
618*38fd1498Szrj 
619*38fd1498Szrj /* Return nonzero if conditions COND1 and COND2 can never be both true.  */
620*38fd1498Szrj static int
conditions_mutex_p(const_rtx cond1,const_rtx cond2,bool rev1,bool rev2)621*38fd1498Szrj conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
622*38fd1498Szrj {
623*38fd1498Szrj   if (COMPARISON_P (cond1)
624*38fd1498Szrj       && COMPARISON_P (cond2)
625*38fd1498Szrj       && GET_CODE (cond1) ==
626*38fd1498Szrj 	  (rev1==rev2
627*38fd1498Szrj 	  ? reversed_comparison_code (cond2, NULL)
628*38fd1498Szrj 	  : GET_CODE (cond2))
629*38fd1498Szrj       && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
630*38fd1498Szrj       && XEXP (cond1, 1) == XEXP (cond2, 1))
631*38fd1498Szrj     return 1;
632*38fd1498Szrj   return 0;
633*38fd1498Szrj }
634*38fd1498Szrj 
635*38fd1498Szrj /* Return true if insn1 and insn2 can never depend on one another because
636*38fd1498Szrj    the conditions under which they are executed are mutually exclusive.  */
637*38fd1498Szrj bool
sched_insns_conditions_mutex_p(const rtx_insn * insn1,const rtx_insn * insn2)638*38fd1498Szrj sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2)
639*38fd1498Szrj {
640*38fd1498Szrj   rtx cond1, cond2;
641*38fd1498Szrj   bool rev1 = false, rev2 = false;
642*38fd1498Szrj 
643*38fd1498Szrj   /* df doesn't handle conditional lifetimes entirely correctly;
644*38fd1498Szrj      calls mess up the conditional lifetimes.  */
645*38fd1498Szrj   if (!CALL_P (insn1) && !CALL_P (insn2))
646*38fd1498Szrj     {
647*38fd1498Szrj       cond1 = sched_get_condition_with_rev (insn1, &rev1);
648*38fd1498Szrj       cond2 = sched_get_condition_with_rev (insn2, &rev2);
649*38fd1498Szrj       if (cond1 && cond2
650*38fd1498Szrj 	  && conditions_mutex_p (cond1, cond2, rev1, rev2)
651*38fd1498Szrj 	  /* Make sure first instruction doesn't affect condition of second
652*38fd1498Szrj 	     instruction if switched.  */
653*38fd1498Szrj 	  && !modified_in_p (cond1, insn2)
654*38fd1498Szrj 	  /* Make sure second instruction doesn't affect condition of first
655*38fd1498Szrj 	     instruction if switched.  */
656*38fd1498Szrj 	  && !modified_in_p (cond2, insn1))
657*38fd1498Szrj 	return true;
658*38fd1498Szrj     }
659*38fd1498Szrj   return false;
660*38fd1498Szrj }
661*38fd1498Szrj 
662*38fd1498Szrj 
663*38fd1498Szrj /* Return true if INSN can potentially be speculated with type DS.  */
664*38fd1498Szrj bool
sched_insn_is_legitimate_for_speculation_p(const rtx_insn * insn,ds_t ds)665*38fd1498Szrj sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds)
666*38fd1498Szrj {
667*38fd1498Szrj   if (HAS_INTERNAL_DEP (insn))
668*38fd1498Szrj     return false;
669*38fd1498Szrj 
670*38fd1498Szrj   if (!NONJUMP_INSN_P (insn))
671*38fd1498Szrj     return false;
672*38fd1498Szrj 
673*38fd1498Szrj   if (SCHED_GROUP_P (insn))
674*38fd1498Szrj     return false;
675*38fd1498Szrj 
676*38fd1498Szrj   if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn)))
677*38fd1498Szrj     return false;
678*38fd1498Szrj 
679*38fd1498Szrj   if (side_effects_p (PATTERN (insn)))
680*38fd1498Szrj     return false;
681*38fd1498Szrj 
682*38fd1498Szrj   if (ds & BE_IN_SPEC)
683*38fd1498Szrj     /* The following instructions, which depend on a speculatively scheduled
684*38fd1498Szrj        instruction, cannot be speculatively scheduled along.  */
685*38fd1498Szrj     {
686*38fd1498Szrj       if (may_trap_or_fault_p (PATTERN (insn)))
687*38fd1498Szrj 	/* If instruction might fault, it cannot be speculatively scheduled.
688*38fd1498Szrj 	   For control speculation it's obvious why and for data speculation
689*38fd1498Szrj 	   it's because the insn might get wrong input if speculation
690*38fd1498Szrj 	   wasn't successful.  */
691*38fd1498Szrj 	return false;
692*38fd1498Szrj 
693*38fd1498Szrj       if ((ds & BE_IN_DATA)
694*38fd1498Szrj 	  && sched_has_condition_p (insn))
695*38fd1498Szrj 	/* If this is a predicated instruction, then it cannot be
696*38fd1498Szrj 	   speculatively scheduled.  See PR35659.  */
697*38fd1498Szrj 	return false;
698*38fd1498Szrj     }
699*38fd1498Szrj 
700*38fd1498Szrj   return true;
701*38fd1498Szrj }
702*38fd1498Szrj 
703*38fd1498Szrj /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
704*38fd1498Szrj    initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
705*38fd1498Szrj    and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
706*38fd1498Szrj    This function is used to switch sd_iterator to the next list.
707*38fd1498Szrj    !!! For internal use only.  Might consider moving it to sched-int.h.  */
708*38fd1498Szrj void
sd_next_list(const_rtx insn,sd_list_types_def * types_ptr,deps_list_t * list_ptr,bool * resolved_p_ptr)709*38fd1498Szrj sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
710*38fd1498Szrj 	      deps_list_t *list_ptr, bool *resolved_p_ptr)
711*38fd1498Szrj {
712*38fd1498Szrj   sd_list_types_def types = *types_ptr;
713*38fd1498Szrj 
714*38fd1498Szrj   if (types & SD_LIST_HARD_BACK)
715*38fd1498Szrj     {
716*38fd1498Szrj       *list_ptr = INSN_HARD_BACK_DEPS (insn);
717*38fd1498Szrj       *resolved_p_ptr = false;
718*38fd1498Szrj       *types_ptr = types & ~SD_LIST_HARD_BACK;
719*38fd1498Szrj     }
720*38fd1498Szrj   else if (types & SD_LIST_SPEC_BACK)
721*38fd1498Szrj     {
722*38fd1498Szrj       *list_ptr = INSN_SPEC_BACK_DEPS (insn);
723*38fd1498Szrj       *resolved_p_ptr = false;
724*38fd1498Szrj       *types_ptr = types & ~SD_LIST_SPEC_BACK;
725*38fd1498Szrj     }
726*38fd1498Szrj   else if (types & SD_LIST_FORW)
727*38fd1498Szrj     {
728*38fd1498Szrj       *list_ptr = INSN_FORW_DEPS (insn);
729*38fd1498Szrj       *resolved_p_ptr = false;
730*38fd1498Szrj       *types_ptr = types & ~SD_LIST_FORW;
731*38fd1498Szrj     }
732*38fd1498Szrj   else if (types & SD_LIST_RES_BACK)
733*38fd1498Szrj     {
734*38fd1498Szrj       *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
735*38fd1498Szrj       *resolved_p_ptr = true;
736*38fd1498Szrj       *types_ptr = types & ~SD_LIST_RES_BACK;
737*38fd1498Szrj     }
738*38fd1498Szrj   else if (types & SD_LIST_RES_FORW)
739*38fd1498Szrj     {
740*38fd1498Szrj       *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
741*38fd1498Szrj       *resolved_p_ptr = true;
742*38fd1498Szrj       *types_ptr = types & ~SD_LIST_RES_FORW;
743*38fd1498Szrj     }
744*38fd1498Szrj   else
745*38fd1498Szrj     {
746*38fd1498Szrj       *list_ptr = NULL;
747*38fd1498Szrj       *resolved_p_ptr = false;
748*38fd1498Szrj       *types_ptr = SD_LIST_NONE;
749*38fd1498Szrj     }
750*38fd1498Szrj }
751*38fd1498Szrj 
752*38fd1498Szrj /* Return the summary size of INSN's lists defined by LIST_TYPES.  */
753*38fd1498Szrj int
sd_lists_size(const_rtx insn,sd_list_types_def list_types)754*38fd1498Szrj sd_lists_size (const_rtx insn, sd_list_types_def list_types)
755*38fd1498Szrj {
756*38fd1498Szrj   int size = 0;
757*38fd1498Szrj 
758*38fd1498Szrj   while (list_types != SD_LIST_NONE)
759*38fd1498Szrj     {
760*38fd1498Szrj       deps_list_t list;
761*38fd1498Szrj       bool resolved_p;
762*38fd1498Szrj 
763*38fd1498Szrj       sd_next_list (insn, &list_types, &list, &resolved_p);
764*38fd1498Szrj       if (list)
765*38fd1498Szrj 	size += DEPS_LIST_N_LINKS (list);
766*38fd1498Szrj     }
767*38fd1498Szrj 
768*38fd1498Szrj   return size;
769*38fd1498Szrj }
770*38fd1498Szrj 
771*38fd1498Szrj /* Return true if INSN's lists defined by LIST_TYPES are all empty.  */
772*38fd1498Szrj 
773*38fd1498Szrj bool
sd_lists_empty_p(const_rtx insn,sd_list_types_def list_types)774*38fd1498Szrj sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
775*38fd1498Szrj {
776*38fd1498Szrj   while (list_types != SD_LIST_NONE)
777*38fd1498Szrj     {
778*38fd1498Szrj       deps_list_t list;
779*38fd1498Szrj       bool resolved_p;
780*38fd1498Szrj 
781*38fd1498Szrj       sd_next_list (insn, &list_types, &list, &resolved_p);
782*38fd1498Szrj       if (!deps_list_empty_p (list))
783*38fd1498Szrj 	return false;
784*38fd1498Szrj     }
785*38fd1498Szrj 
786*38fd1498Szrj   return true;
787*38fd1498Szrj }
788*38fd1498Szrj 
789*38fd1498Szrj /* Initialize data for INSN.  */
790*38fd1498Szrj void
sd_init_insn(rtx_insn * insn)791*38fd1498Szrj sd_init_insn (rtx_insn *insn)
792*38fd1498Szrj {
793*38fd1498Szrj   INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
794*38fd1498Szrj   INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
795*38fd1498Szrj   INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
796*38fd1498Szrj   INSN_FORW_DEPS (insn) = create_deps_list ();
797*38fd1498Szrj   INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
798*38fd1498Szrj 
799*38fd1498Szrj   /* ??? It would be nice to allocate dependency caches here.  */
800*38fd1498Szrj }
801*38fd1498Szrj 
802*38fd1498Szrj /* Free data for INSN.  */
803*38fd1498Szrj void
sd_finish_insn(rtx_insn * insn)804*38fd1498Szrj sd_finish_insn (rtx_insn *insn)
805*38fd1498Szrj {
806*38fd1498Szrj   /* ??? It would be nice to deallocate dependency caches here.  */
807*38fd1498Szrj 
808*38fd1498Szrj   free_deps_list (INSN_HARD_BACK_DEPS (insn));
809*38fd1498Szrj   INSN_HARD_BACK_DEPS (insn) = NULL;
810*38fd1498Szrj 
811*38fd1498Szrj   free_deps_list (INSN_SPEC_BACK_DEPS (insn));
812*38fd1498Szrj   INSN_SPEC_BACK_DEPS (insn) = NULL;
813*38fd1498Szrj 
814*38fd1498Szrj   free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
815*38fd1498Szrj   INSN_RESOLVED_BACK_DEPS (insn) = NULL;
816*38fd1498Szrj 
817*38fd1498Szrj   free_deps_list (INSN_FORW_DEPS (insn));
818*38fd1498Szrj   INSN_FORW_DEPS (insn) = NULL;
819*38fd1498Szrj 
820*38fd1498Szrj   free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
821*38fd1498Szrj   INSN_RESOLVED_FORW_DEPS (insn) = NULL;
822*38fd1498Szrj }
823*38fd1498Szrj 
824*38fd1498Szrj /* Find a dependency between producer PRO and consumer CON.
825*38fd1498Szrj    Search through resolved dependency lists if RESOLVED_P is true.
826*38fd1498Szrj    If no such dependency is found return NULL,
827*38fd1498Szrj    otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
828*38fd1498Szrj    with an iterator pointing to it.  */
829*38fd1498Szrj static dep_t
sd_find_dep_between_no_cache(rtx pro,rtx con,bool resolved_p,sd_iterator_def * sd_it_ptr)830*38fd1498Szrj sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
831*38fd1498Szrj 			      sd_iterator_def *sd_it_ptr)
832*38fd1498Szrj {
833*38fd1498Szrj   sd_list_types_def pro_list_type;
834*38fd1498Szrj   sd_list_types_def con_list_type;
835*38fd1498Szrj   sd_iterator_def sd_it;
836*38fd1498Szrj   dep_t dep;
837*38fd1498Szrj   bool found_p = false;
838*38fd1498Szrj 
839*38fd1498Szrj   if (resolved_p)
840*38fd1498Szrj     {
841*38fd1498Szrj       pro_list_type = SD_LIST_RES_FORW;
842*38fd1498Szrj       con_list_type = SD_LIST_RES_BACK;
843*38fd1498Szrj     }
844*38fd1498Szrj   else
845*38fd1498Szrj     {
846*38fd1498Szrj       pro_list_type = SD_LIST_FORW;
847*38fd1498Szrj       con_list_type = SD_LIST_BACK;
848*38fd1498Szrj     }
849*38fd1498Szrj 
850*38fd1498Szrj   /* Walk through either back list of INSN or forw list of ELEM
851*38fd1498Szrj      depending on which one is shorter.  */
852*38fd1498Szrj   if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
853*38fd1498Szrj     {
854*38fd1498Szrj       /* Find the dep_link with producer PRO in consumer's back_deps.  */
855*38fd1498Szrj       FOR_EACH_DEP (con, con_list_type, sd_it, dep)
856*38fd1498Szrj 	if (DEP_PRO (dep) == pro)
857*38fd1498Szrj 	  {
858*38fd1498Szrj 	    found_p = true;
859*38fd1498Szrj 	    break;
860*38fd1498Szrj 	  }
861*38fd1498Szrj     }
862*38fd1498Szrj   else
863*38fd1498Szrj     {
864*38fd1498Szrj       /* Find the dep_link with consumer CON in producer's forw_deps.  */
865*38fd1498Szrj       FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
866*38fd1498Szrj 	if (DEP_CON (dep) == con)
867*38fd1498Szrj 	  {
868*38fd1498Szrj 	    found_p = true;
869*38fd1498Szrj 	    break;
870*38fd1498Szrj 	  }
871*38fd1498Szrj     }
872*38fd1498Szrj 
873*38fd1498Szrj   if (found_p)
874*38fd1498Szrj     {
875*38fd1498Szrj       if (sd_it_ptr != NULL)
876*38fd1498Szrj 	*sd_it_ptr = sd_it;
877*38fd1498Szrj 
878*38fd1498Szrj       return dep;
879*38fd1498Szrj     }
880*38fd1498Szrj 
881*38fd1498Szrj   return NULL;
882*38fd1498Szrj }
883*38fd1498Szrj 
884*38fd1498Szrj /* Find a dependency between producer PRO and consumer CON.
885*38fd1498Szrj    Use dependency [if available] to check if dependency is present at all.
886*38fd1498Szrj    Search through resolved dependency lists if RESOLVED_P is true.
887*38fd1498Szrj    If the dependency or NULL if none found.  */
888*38fd1498Szrj dep_t
sd_find_dep_between(rtx pro,rtx con,bool resolved_p)889*38fd1498Szrj sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
890*38fd1498Szrj {
891*38fd1498Szrj   if (true_dependency_cache != NULL)
892*38fd1498Szrj     /* Avoiding the list walk below can cut compile times dramatically
893*38fd1498Szrj        for some code.  */
894*38fd1498Szrj     {
895*38fd1498Szrj       int elem_luid = INSN_LUID (pro);
896*38fd1498Szrj       int insn_luid = INSN_LUID (con);
897*38fd1498Szrj 
898*38fd1498Szrj       if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
899*38fd1498Szrj 	  && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
900*38fd1498Szrj 	  && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
901*38fd1498Szrj 	  && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
902*38fd1498Szrj 	return NULL;
903*38fd1498Szrj     }
904*38fd1498Szrj 
905*38fd1498Szrj   return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
906*38fd1498Szrj }
907*38fd1498Szrj 
908*38fd1498Szrj /* Add or update  a dependence described by DEP.
909*38fd1498Szrj    MEM1 and MEM2, if non-null, correspond to memory locations in case of
910*38fd1498Szrj    data speculation.
911*38fd1498Szrj 
912*38fd1498Szrj    The function returns a value indicating if an old entry has been changed
913*38fd1498Szrj    or a new entry has been added to insn's backward deps.
914*38fd1498Szrj 
915*38fd1498Szrj    This function merely checks if producer and consumer is the same insn
916*38fd1498Szrj    and doesn't create a dep in this case.  Actual manipulation of
917*38fd1498Szrj    dependence data structures is performed in add_or_update_dep_1.  */
918*38fd1498Szrj static enum DEPS_ADJUST_RESULT
maybe_add_or_update_dep_1(dep_t dep,bool resolved_p,rtx mem1,rtx mem2)919*38fd1498Szrj maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
920*38fd1498Szrj {
921*38fd1498Szrj   rtx_insn *elem = DEP_PRO (dep);
922*38fd1498Szrj   rtx_insn *insn = DEP_CON (dep);
923*38fd1498Szrj 
924*38fd1498Szrj   gcc_assert (INSN_P (insn) && INSN_P (elem));
925*38fd1498Szrj 
926*38fd1498Szrj   /* Don't depend an insn on itself.  */
927*38fd1498Szrj   if (insn == elem)
928*38fd1498Szrj     {
929*38fd1498Szrj       if (sched_deps_info->generate_spec_deps)
930*38fd1498Szrj         /* INSN has an internal dependence, which we can't overcome.  */
931*38fd1498Szrj         HAS_INTERNAL_DEP (insn) = 1;
932*38fd1498Szrj 
933*38fd1498Szrj       return DEP_NODEP;
934*38fd1498Szrj     }
935*38fd1498Szrj 
936*38fd1498Szrj   return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
937*38fd1498Szrj }
938*38fd1498Szrj 
939*38fd1498Szrj /* Ask dependency caches what needs to be done for dependence DEP.
940*38fd1498Szrj    Return DEP_CREATED if new dependence should be created and there is no
941*38fd1498Szrj    need to try to find one searching the dependencies lists.
942*38fd1498Szrj    Return DEP_PRESENT if there already is a dependence described by DEP and
943*38fd1498Szrj    hence nothing is to be done.
944*38fd1498Szrj    Return DEP_CHANGED if there already is a dependence, but it should be
945*38fd1498Szrj    updated to incorporate additional information from DEP.  */
946*38fd1498Szrj static enum DEPS_ADJUST_RESULT
ask_dependency_caches(dep_t dep)947*38fd1498Szrj ask_dependency_caches (dep_t dep)
948*38fd1498Szrj {
949*38fd1498Szrj   int elem_luid = INSN_LUID (DEP_PRO (dep));
950*38fd1498Szrj   int insn_luid = INSN_LUID (DEP_CON (dep));
951*38fd1498Szrj 
952*38fd1498Szrj   gcc_assert (true_dependency_cache != NULL
953*38fd1498Szrj 	      && output_dependency_cache != NULL
954*38fd1498Szrj 	      && anti_dependency_cache != NULL
955*38fd1498Szrj 	      && control_dependency_cache != NULL);
956*38fd1498Szrj 
957*38fd1498Szrj   if (!(current_sched_info->flags & USE_DEPS_LIST))
958*38fd1498Szrj     {
959*38fd1498Szrj       enum reg_note present_dep_type;
960*38fd1498Szrj 
961*38fd1498Szrj       if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
962*38fd1498Szrj 	present_dep_type = REG_DEP_TRUE;
963*38fd1498Szrj       else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
964*38fd1498Szrj 	present_dep_type = REG_DEP_OUTPUT;
965*38fd1498Szrj       else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
966*38fd1498Szrj 	present_dep_type = REG_DEP_ANTI;
967*38fd1498Szrj       else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
968*38fd1498Szrj 	present_dep_type = REG_DEP_CONTROL;
969*38fd1498Szrj       else
970*38fd1498Szrj 	/* There is no existing dep so it should be created.  */
971*38fd1498Szrj 	return DEP_CREATED;
972*38fd1498Szrj 
973*38fd1498Szrj       if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
974*38fd1498Szrj 	/* DEP does not add anything to the existing dependence.  */
975*38fd1498Szrj 	return DEP_PRESENT;
976*38fd1498Szrj     }
977*38fd1498Szrj   else
978*38fd1498Szrj     {
979*38fd1498Szrj       ds_t present_dep_types = 0;
980*38fd1498Szrj 
981*38fd1498Szrj       if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
982*38fd1498Szrj 	present_dep_types |= DEP_TRUE;
983*38fd1498Szrj       if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
984*38fd1498Szrj 	present_dep_types |= DEP_OUTPUT;
985*38fd1498Szrj       if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
986*38fd1498Szrj 	present_dep_types |= DEP_ANTI;
987*38fd1498Szrj       if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
988*38fd1498Szrj 	present_dep_types |= DEP_CONTROL;
989*38fd1498Szrj 
990*38fd1498Szrj       if (present_dep_types == 0)
991*38fd1498Szrj 	/* There is no existing dep so it should be created.  */
992*38fd1498Szrj 	return DEP_CREATED;
993*38fd1498Szrj 
994*38fd1498Szrj       if (!(current_sched_info->flags & DO_SPECULATION)
995*38fd1498Szrj 	  || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
996*38fd1498Szrj 	{
997*38fd1498Szrj 	  if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
998*38fd1498Szrj 	      == present_dep_types)
999*38fd1498Szrj 	    /* DEP does not add anything to the existing dependence.  */
1000*38fd1498Szrj 	    return DEP_PRESENT;
1001*38fd1498Szrj 	}
1002*38fd1498Szrj       else
1003*38fd1498Szrj 	{
1004*38fd1498Szrj 	  /* Only true dependencies can be data speculative and
1005*38fd1498Szrj 	     only anti dependencies can be control speculative.  */
1006*38fd1498Szrj 	  gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1007*38fd1498Szrj 		      == present_dep_types);
1008*38fd1498Szrj 
1009*38fd1498Szrj 	  /* if (DEP is SPECULATIVE) then
1010*38fd1498Szrj 	     ..we should update DEP_STATUS
1011*38fd1498Szrj 	     else
1012*38fd1498Szrj 	     ..we should reset existing dep to non-speculative.  */
1013*38fd1498Szrj 	}
1014*38fd1498Szrj     }
1015*38fd1498Szrj 
1016*38fd1498Szrj   return DEP_CHANGED;
1017*38fd1498Szrj }
1018*38fd1498Szrj 
1019*38fd1498Szrj /* Set dependency caches according to DEP.  */
1020*38fd1498Szrj static void
set_dependency_caches(dep_t dep)1021*38fd1498Szrj set_dependency_caches (dep_t dep)
1022*38fd1498Szrj {
1023*38fd1498Szrj   int elem_luid = INSN_LUID (DEP_PRO (dep));
1024*38fd1498Szrj   int insn_luid = INSN_LUID (DEP_CON (dep));
1025*38fd1498Szrj 
1026*38fd1498Szrj   if (!(current_sched_info->flags & USE_DEPS_LIST))
1027*38fd1498Szrj     {
1028*38fd1498Szrj       switch (DEP_TYPE (dep))
1029*38fd1498Szrj 	{
1030*38fd1498Szrj 	case REG_DEP_TRUE:
1031*38fd1498Szrj 	  bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1032*38fd1498Szrj 	  break;
1033*38fd1498Szrj 
1034*38fd1498Szrj 	case REG_DEP_OUTPUT:
1035*38fd1498Szrj 	  bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1036*38fd1498Szrj 	  break;
1037*38fd1498Szrj 
1038*38fd1498Szrj 	case REG_DEP_ANTI:
1039*38fd1498Szrj 	  bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1040*38fd1498Szrj 	  break;
1041*38fd1498Szrj 
1042*38fd1498Szrj 	case REG_DEP_CONTROL:
1043*38fd1498Szrj 	  bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1044*38fd1498Szrj 	  break;
1045*38fd1498Szrj 
1046*38fd1498Szrj 	default:
1047*38fd1498Szrj 	  gcc_unreachable ();
1048*38fd1498Szrj 	}
1049*38fd1498Szrj     }
1050*38fd1498Szrj   else
1051*38fd1498Szrj     {
1052*38fd1498Szrj       ds_t ds = DEP_STATUS (dep);
1053*38fd1498Szrj 
1054*38fd1498Szrj       if (ds & DEP_TRUE)
1055*38fd1498Szrj 	bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1056*38fd1498Szrj       if (ds & DEP_OUTPUT)
1057*38fd1498Szrj 	bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1058*38fd1498Szrj       if (ds & DEP_ANTI)
1059*38fd1498Szrj 	bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1060*38fd1498Szrj       if (ds & DEP_CONTROL)
1061*38fd1498Szrj 	bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1062*38fd1498Szrj 
1063*38fd1498Szrj       if (ds & SPECULATIVE)
1064*38fd1498Szrj 	{
1065*38fd1498Szrj 	  gcc_assert (current_sched_info->flags & DO_SPECULATION);
1066*38fd1498Szrj 	  bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1067*38fd1498Szrj 	}
1068*38fd1498Szrj     }
1069*38fd1498Szrj }
1070*38fd1498Szrj 
1071*38fd1498Szrj /* Type of dependence DEP have changed from OLD_TYPE.  Update dependency
1072*38fd1498Szrj    caches accordingly.  */
1073*38fd1498Szrj static void
update_dependency_caches(dep_t dep,enum reg_note old_type)1074*38fd1498Szrj update_dependency_caches (dep_t dep, enum reg_note old_type)
1075*38fd1498Szrj {
1076*38fd1498Szrj   int elem_luid = INSN_LUID (DEP_PRO (dep));
1077*38fd1498Szrj   int insn_luid = INSN_LUID (DEP_CON (dep));
1078*38fd1498Szrj 
1079*38fd1498Szrj   /* Clear corresponding cache entry because type of the link
1080*38fd1498Szrj      may have changed.  Keep them if we use_deps_list.  */
1081*38fd1498Szrj   if (!(current_sched_info->flags & USE_DEPS_LIST))
1082*38fd1498Szrj     {
1083*38fd1498Szrj       switch (old_type)
1084*38fd1498Szrj 	{
1085*38fd1498Szrj 	case REG_DEP_OUTPUT:
1086*38fd1498Szrj 	  bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1087*38fd1498Szrj 	  break;
1088*38fd1498Szrj 
1089*38fd1498Szrj 	case REG_DEP_ANTI:
1090*38fd1498Szrj 	  bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1091*38fd1498Szrj 	  break;
1092*38fd1498Szrj 
1093*38fd1498Szrj 	case REG_DEP_CONTROL:
1094*38fd1498Szrj 	  bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1095*38fd1498Szrj 	  break;
1096*38fd1498Szrj 
1097*38fd1498Szrj 	default:
1098*38fd1498Szrj 	  gcc_unreachable ();
1099*38fd1498Szrj 	}
1100*38fd1498Szrj     }
1101*38fd1498Szrj 
1102*38fd1498Szrj   set_dependency_caches (dep);
1103*38fd1498Szrj }
1104*38fd1498Szrj 
1105*38fd1498Szrj /* Convert a dependence pointed to by SD_IT to be non-speculative.  */
1106*38fd1498Szrj static void
change_spec_dep_to_hard(sd_iterator_def sd_it)1107*38fd1498Szrj change_spec_dep_to_hard (sd_iterator_def sd_it)
1108*38fd1498Szrj {
1109*38fd1498Szrj   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1110*38fd1498Szrj   dep_link_t link = DEP_NODE_BACK (node);
1111*38fd1498Szrj   dep_t dep = DEP_NODE_DEP (node);
1112*38fd1498Szrj   rtx_insn *elem = DEP_PRO (dep);
1113*38fd1498Szrj   rtx_insn *insn = DEP_CON (dep);
1114*38fd1498Szrj 
1115*38fd1498Szrj   move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1116*38fd1498Szrj 
1117*38fd1498Szrj   DEP_STATUS (dep) &= ~SPECULATIVE;
1118*38fd1498Szrj 
1119*38fd1498Szrj   if (true_dependency_cache != NULL)
1120*38fd1498Szrj     /* Clear the cache entry.  */
1121*38fd1498Szrj     bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1122*38fd1498Szrj 		      INSN_LUID (elem));
1123*38fd1498Szrj }
1124*38fd1498Szrj 
1125*38fd1498Szrj /* Update DEP to incorporate information from NEW_DEP.
1126*38fd1498Szrj    SD_IT points to DEP in case it should be moved to another list.
1127*38fd1498Szrj    MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1128*38fd1498Szrj    data-speculative dependence should be updated.  */
1129*38fd1498Szrj static enum DEPS_ADJUST_RESULT
update_dep(dep_t dep,dep_t new_dep,sd_iterator_def sd_it ATTRIBUTE_UNUSED,rtx mem1 ATTRIBUTE_UNUSED,rtx mem2 ATTRIBUTE_UNUSED)1130*38fd1498Szrj update_dep (dep_t dep, dep_t new_dep,
1131*38fd1498Szrj 	    sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1132*38fd1498Szrj 	    rtx mem1 ATTRIBUTE_UNUSED,
1133*38fd1498Szrj 	    rtx mem2 ATTRIBUTE_UNUSED)
1134*38fd1498Szrj {
1135*38fd1498Szrj   enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1136*38fd1498Szrj   enum reg_note old_type = DEP_TYPE (dep);
1137*38fd1498Szrj   bool was_spec = dep_spec_p (dep);
1138*38fd1498Szrj 
1139*38fd1498Szrj   DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1140*38fd1498Szrj   DEP_MULTIPLE (dep) = 1;
1141*38fd1498Szrj 
1142*38fd1498Szrj   /* If this is a more restrictive type of dependence than the
1143*38fd1498Szrj      existing one, then change the existing dependence to this
1144*38fd1498Szrj      type.  */
1145*38fd1498Szrj   if ((int) DEP_TYPE (new_dep) < (int) old_type)
1146*38fd1498Szrj     {
1147*38fd1498Szrj       DEP_TYPE (dep) = DEP_TYPE (new_dep);
1148*38fd1498Szrj       res = DEP_CHANGED;
1149*38fd1498Szrj     }
1150*38fd1498Szrj 
1151*38fd1498Szrj   if (current_sched_info->flags & USE_DEPS_LIST)
1152*38fd1498Szrj     /* Update DEP_STATUS.  */
1153*38fd1498Szrj     {
1154*38fd1498Szrj       ds_t dep_status = DEP_STATUS (dep);
1155*38fd1498Szrj       ds_t ds = DEP_STATUS (new_dep);
1156*38fd1498Szrj       ds_t new_status = ds | dep_status;
1157*38fd1498Szrj 
1158*38fd1498Szrj       if (new_status & SPECULATIVE)
1159*38fd1498Szrj 	{
1160*38fd1498Szrj 	  /* Either existing dep or a dep we're adding or both are
1161*38fd1498Szrj 	     speculative.  */
1162*38fd1498Szrj 	  if (!(ds & SPECULATIVE)
1163*38fd1498Szrj 	      || !(dep_status & SPECULATIVE))
1164*38fd1498Szrj 	    /* The new dep can't be speculative.  */
1165*38fd1498Szrj 	    new_status &= ~SPECULATIVE;
1166*38fd1498Szrj 	  else
1167*38fd1498Szrj 	    {
1168*38fd1498Szrj 	      /* Both are speculative.  Merge probabilities.  */
1169*38fd1498Szrj 	      if (mem1 != NULL)
1170*38fd1498Szrj 		{
1171*38fd1498Szrj 		  dw_t dw;
1172*38fd1498Szrj 
1173*38fd1498Szrj 		  dw = estimate_dep_weak (mem1, mem2);
1174*38fd1498Szrj 		  ds = set_dep_weak (ds, BEGIN_DATA, dw);
1175*38fd1498Szrj 		}
1176*38fd1498Szrj 
1177*38fd1498Szrj 	      new_status = ds_merge (dep_status, ds);
1178*38fd1498Szrj 	    }
1179*38fd1498Szrj 	}
1180*38fd1498Szrj 
1181*38fd1498Szrj       ds = new_status;
1182*38fd1498Szrj 
1183*38fd1498Szrj       if (dep_status != ds)
1184*38fd1498Szrj 	{
1185*38fd1498Szrj 	  DEP_STATUS (dep) = ds;
1186*38fd1498Szrj 	  res = DEP_CHANGED;
1187*38fd1498Szrj 	}
1188*38fd1498Szrj     }
1189*38fd1498Szrj 
1190*38fd1498Szrj   if (was_spec && !dep_spec_p (dep))
1191*38fd1498Szrj     /* The old dep was speculative, but now it isn't.  */
1192*38fd1498Szrj     change_spec_dep_to_hard (sd_it);
1193*38fd1498Szrj 
1194*38fd1498Szrj   if (true_dependency_cache != NULL
1195*38fd1498Szrj       && res == DEP_CHANGED)
1196*38fd1498Szrj     update_dependency_caches (dep, old_type);
1197*38fd1498Szrj 
1198*38fd1498Szrj   return res;
1199*38fd1498Szrj }
1200*38fd1498Szrj 
1201*38fd1498Szrj /* Add or update  a dependence described by DEP.
1202*38fd1498Szrj    MEM1 and MEM2, if non-null, correspond to memory locations in case of
1203*38fd1498Szrj    data speculation.
1204*38fd1498Szrj 
1205*38fd1498Szrj    The function returns a value indicating if an old entry has been changed
1206*38fd1498Szrj    or a new entry has been added to insn's backward deps or nothing has
1207*38fd1498Szrj    been updated at all.  */
1208*38fd1498Szrj static enum DEPS_ADJUST_RESULT
add_or_update_dep_1(dep_t new_dep,bool resolved_p,rtx mem1 ATTRIBUTE_UNUSED,rtx mem2 ATTRIBUTE_UNUSED)1209*38fd1498Szrj add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1210*38fd1498Szrj 		     rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1211*38fd1498Szrj {
1212*38fd1498Szrj   bool maybe_present_p = true;
1213*38fd1498Szrj   bool present_p = false;
1214*38fd1498Szrj 
1215*38fd1498Szrj   gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1216*38fd1498Szrj 	      && DEP_PRO (new_dep) != DEP_CON (new_dep));
1217*38fd1498Szrj 
1218*38fd1498Szrj   if (flag_checking)
1219*38fd1498Szrj     check_dep (new_dep, mem1 != NULL);
1220*38fd1498Szrj 
1221*38fd1498Szrj   if (true_dependency_cache != NULL)
1222*38fd1498Szrj     {
1223*38fd1498Szrj       switch (ask_dependency_caches (new_dep))
1224*38fd1498Szrj 	{
1225*38fd1498Szrj 	case DEP_PRESENT:
1226*38fd1498Szrj 	  dep_t present_dep;
1227*38fd1498Szrj 	  sd_iterator_def sd_it;
1228*38fd1498Szrj 
1229*38fd1498Szrj 	  present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1230*38fd1498Szrj 						      DEP_CON (new_dep),
1231*38fd1498Szrj 						      resolved_p, &sd_it);
1232*38fd1498Szrj 	  DEP_MULTIPLE (present_dep) = 1;
1233*38fd1498Szrj 	  return DEP_PRESENT;
1234*38fd1498Szrj 
1235*38fd1498Szrj 	case DEP_CHANGED:
1236*38fd1498Szrj 	  maybe_present_p = true;
1237*38fd1498Szrj 	  present_p = true;
1238*38fd1498Szrj 	  break;
1239*38fd1498Szrj 
1240*38fd1498Szrj 	case DEP_CREATED:
1241*38fd1498Szrj 	  maybe_present_p = false;
1242*38fd1498Szrj 	  present_p = false;
1243*38fd1498Szrj 	  break;
1244*38fd1498Szrj 
1245*38fd1498Szrj 	default:
1246*38fd1498Szrj 	  gcc_unreachable ();
1247*38fd1498Szrj 	  break;
1248*38fd1498Szrj 	}
1249*38fd1498Szrj     }
1250*38fd1498Szrj 
1251*38fd1498Szrj   /* Check that we don't already have this dependence.  */
1252*38fd1498Szrj   if (maybe_present_p)
1253*38fd1498Szrj     {
1254*38fd1498Szrj       dep_t present_dep;
1255*38fd1498Szrj       sd_iterator_def sd_it;
1256*38fd1498Szrj 
1257*38fd1498Szrj       gcc_assert (true_dependency_cache == NULL || present_p);
1258*38fd1498Szrj 
1259*38fd1498Szrj       present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1260*38fd1498Szrj 						  DEP_CON (new_dep),
1261*38fd1498Szrj 						  resolved_p, &sd_it);
1262*38fd1498Szrj 
1263*38fd1498Szrj       if (present_dep != NULL)
1264*38fd1498Szrj 	/* We found an existing dependency between ELEM and INSN.  */
1265*38fd1498Szrj 	return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1266*38fd1498Szrj       else
1267*38fd1498Szrj 	/* We didn't find a dep, it shouldn't present in the cache.  */
1268*38fd1498Szrj 	gcc_assert (!present_p);
1269*38fd1498Szrj     }
1270*38fd1498Szrj 
1271*38fd1498Szrj   /* Might want to check one level of transitivity to save conses.
1272*38fd1498Szrj      This check should be done in maybe_add_or_update_dep_1.
1273*38fd1498Szrj      Since we made it to add_or_update_dep_1, we must create
1274*38fd1498Szrj      (or update) a link.  */
1275*38fd1498Szrj 
1276*38fd1498Szrj   if (mem1 != NULL_RTX)
1277*38fd1498Szrj     {
1278*38fd1498Szrj       gcc_assert (sched_deps_info->generate_spec_deps);
1279*38fd1498Szrj       DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1280*38fd1498Szrj 					   estimate_dep_weak (mem1, mem2));
1281*38fd1498Szrj     }
1282*38fd1498Szrj 
1283*38fd1498Szrj   sd_add_dep (new_dep, resolved_p);
1284*38fd1498Szrj 
1285*38fd1498Szrj   return DEP_CREATED;
1286*38fd1498Szrj }
1287*38fd1498Szrj 
1288*38fd1498Szrj /* Initialize BACK_LIST_PTR with consumer's backward list and
1289*38fd1498Szrj    FORW_LIST_PTR with producer's forward list.  If RESOLVED_P is true
1290*38fd1498Szrj    initialize with lists that hold resolved deps.  */
1291*38fd1498Szrj static void
get_back_and_forw_lists(dep_t dep,bool resolved_p,deps_list_t * back_list_ptr,deps_list_t * forw_list_ptr)1292*38fd1498Szrj get_back_and_forw_lists (dep_t dep, bool resolved_p,
1293*38fd1498Szrj 			 deps_list_t *back_list_ptr,
1294*38fd1498Szrj 			 deps_list_t *forw_list_ptr)
1295*38fd1498Szrj {
1296*38fd1498Szrj   rtx_insn *con = DEP_CON (dep);
1297*38fd1498Szrj 
1298*38fd1498Szrj   if (!resolved_p)
1299*38fd1498Szrj     {
1300*38fd1498Szrj       if (dep_spec_p (dep))
1301*38fd1498Szrj 	*back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1302*38fd1498Szrj       else
1303*38fd1498Szrj 	*back_list_ptr = INSN_HARD_BACK_DEPS (con);
1304*38fd1498Szrj 
1305*38fd1498Szrj       *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1306*38fd1498Szrj     }
1307*38fd1498Szrj   else
1308*38fd1498Szrj     {
1309*38fd1498Szrj       *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1310*38fd1498Szrj       *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1311*38fd1498Szrj     }
1312*38fd1498Szrj }
1313*38fd1498Szrj 
1314*38fd1498Szrj /* Add dependence described by DEP.
1315*38fd1498Szrj    If RESOLVED_P is true treat the dependence as a resolved one.  */
1316*38fd1498Szrj void
sd_add_dep(dep_t dep,bool resolved_p)1317*38fd1498Szrj sd_add_dep (dep_t dep, bool resolved_p)
1318*38fd1498Szrj {
1319*38fd1498Szrj   dep_node_t n = create_dep_node ();
1320*38fd1498Szrj   deps_list_t con_back_deps;
1321*38fd1498Szrj   deps_list_t pro_forw_deps;
1322*38fd1498Szrj   rtx_insn *elem = DEP_PRO (dep);
1323*38fd1498Szrj   rtx_insn *insn = DEP_CON (dep);
1324*38fd1498Szrj 
1325*38fd1498Szrj   gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1326*38fd1498Szrj 
1327*38fd1498Szrj   if ((current_sched_info->flags & DO_SPECULATION) == 0
1328*38fd1498Szrj       || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1329*38fd1498Szrj     DEP_STATUS (dep) &= ~SPECULATIVE;
1330*38fd1498Szrj 
1331*38fd1498Szrj   copy_dep (DEP_NODE_DEP (n), dep);
1332*38fd1498Szrj 
1333*38fd1498Szrj   get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1334*38fd1498Szrj 
1335*38fd1498Szrj   add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1336*38fd1498Szrj 
1337*38fd1498Szrj   if (flag_checking)
1338*38fd1498Szrj     check_dep (dep, false);
1339*38fd1498Szrj 
1340*38fd1498Szrj   add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1341*38fd1498Szrj 
1342*38fd1498Szrj   /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1343*38fd1498Szrj      in the bitmap caches of dependency information.  */
1344*38fd1498Szrj   if (true_dependency_cache != NULL)
1345*38fd1498Szrj     set_dependency_caches (dep);
1346*38fd1498Szrj }
1347*38fd1498Szrj 
1348*38fd1498Szrj /* Add or update backward dependence between INSN and ELEM
1349*38fd1498Szrj    with given type DEP_TYPE and dep_status DS.
1350*38fd1498Szrj    This function is a convenience wrapper.  */
1351*38fd1498Szrj enum DEPS_ADJUST_RESULT
sd_add_or_update_dep(dep_t dep,bool resolved_p)1352*38fd1498Szrj sd_add_or_update_dep (dep_t dep, bool resolved_p)
1353*38fd1498Szrj {
1354*38fd1498Szrj   return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1355*38fd1498Szrj }
1356*38fd1498Szrj 
1357*38fd1498Szrj /* Resolved dependence pointed to by SD_IT.
1358*38fd1498Szrj    SD_IT will advance to the next element.  */
1359*38fd1498Szrj void
sd_resolve_dep(sd_iterator_def sd_it)1360*38fd1498Szrj sd_resolve_dep (sd_iterator_def sd_it)
1361*38fd1498Szrj {
1362*38fd1498Szrj   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1363*38fd1498Szrj   dep_t dep = DEP_NODE_DEP (node);
1364*38fd1498Szrj   rtx_insn *pro = DEP_PRO (dep);
1365*38fd1498Szrj   rtx_insn *con = DEP_CON (dep);
1366*38fd1498Szrj 
1367*38fd1498Szrj   if (dep_spec_p (dep))
1368*38fd1498Szrj     move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1369*38fd1498Szrj 		   INSN_RESOLVED_BACK_DEPS (con));
1370*38fd1498Szrj   else
1371*38fd1498Szrj     move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1372*38fd1498Szrj 		   INSN_RESOLVED_BACK_DEPS (con));
1373*38fd1498Szrj 
1374*38fd1498Szrj   move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1375*38fd1498Szrj 		 INSN_RESOLVED_FORW_DEPS (pro));
1376*38fd1498Szrj }
1377*38fd1498Szrj 
1378*38fd1498Szrj /* Perform the inverse operation of sd_resolve_dep.  Restore the dependence
1379*38fd1498Szrj    pointed to by SD_IT to unresolved state.  */
1380*38fd1498Szrj void
sd_unresolve_dep(sd_iterator_def sd_it)1381*38fd1498Szrj sd_unresolve_dep (sd_iterator_def sd_it)
1382*38fd1498Szrj {
1383*38fd1498Szrj   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1384*38fd1498Szrj   dep_t dep = DEP_NODE_DEP (node);
1385*38fd1498Szrj   rtx_insn *pro = DEP_PRO (dep);
1386*38fd1498Szrj   rtx_insn *con = DEP_CON (dep);
1387*38fd1498Szrj 
1388*38fd1498Szrj   if (dep_spec_p (dep))
1389*38fd1498Szrj     move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1390*38fd1498Szrj 		   INSN_SPEC_BACK_DEPS (con));
1391*38fd1498Szrj   else
1392*38fd1498Szrj     move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1393*38fd1498Szrj 		   INSN_HARD_BACK_DEPS (con));
1394*38fd1498Szrj 
1395*38fd1498Szrj   move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1396*38fd1498Szrj 		 INSN_FORW_DEPS (pro));
1397*38fd1498Szrj }
1398*38fd1498Szrj 
1399*38fd1498Szrj /* Make TO depend on all the FROM's producers.
1400*38fd1498Szrj    If RESOLVED_P is true add dependencies to the resolved lists.  */
1401*38fd1498Szrj void
sd_copy_back_deps(rtx_insn * to,rtx_insn * from,bool resolved_p)1402*38fd1498Szrj sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
1403*38fd1498Szrj {
1404*38fd1498Szrj   sd_list_types_def list_type;
1405*38fd1498Szrj   sd_iterator_def sd_it;
1406*38fd1498Szrj   dep_t dep;
1407*38fd1498Szrj 
1408*38fd1498Szrj   list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1409*38fd1498Szrj 
1410*38fd1498Szrj   FOR_EACH_DEP (from, list_type, sd_it, dep)
1411*38fd1498Szrj     {
1412*38fd1498Szrj       dep_def _new_dep, *new_dep = &_new_dep;
1413*38fd1498Szrj 
1414*38fd1498Szrj       copy_dep (new_dep, dep);
1415*38fd1498Szrj       DEP_CON (new_dep) = to;
1416*38fd1498Szrj       sd_add_dep (new_dep, resolved_p);
1417*38fd1498Szrj     }
1418*38fd1498Szrj }
1419*38fd1498Szrj 
1420*38fd1498Szrj /* Remove a dependency referred to by SD_IT.
1421*38fd1498Szrj    SD_IT will point to the next dependence after removal.  */
1422*38fd1498Szrj void
sd_delete_dep(sd_iterator_def sd_it)1423*38fd1498Szrj sd_delete_dep (sd_iterator_def sd_it)
1424*38fd1498Szrj {
1425*38fd1498Szrj   dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1426*38fd1498Szrj   dep_t dep = DEP_NODE_DEP (n);
1427*38fd1498Szrj   rtx_insn *pro = DEP_PRO (dep);
1428*38fd1498Szrj   rtx_insn *con = DEP_CON (dep);
1429*38fd1498Szrj   deps_list_t con_back_deps;
1430*38fd1498Szrj   deps_list_t pro_forw_deps;
1431*38fd1498Szrj 
1432*38fd1498Szrj   if (true_dependency_cache != NULL)
1433*38fd1498Szrj     {
1434*38fd1498Szrj       int elem_luid = INSN_LUID (pro);
1435*38fd1498Szrj       int insn_luid = INSN_LUID (con);
1436*38fd1498Szrj 
1437*38fd1498Szrj       bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1438*38fd1498Szrj       bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1439*38fd1498Szrj       bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1440*38fd1498Szrj       bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1441*38fd1498Szrj 
1442*38fd1498Szrj       if (current_sched_info->flags & DO_SPECULATION)
1443*38fd1498Szrj 	bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1444*38fd1498Szrj     }
1445*38fd1498Szrj 
1446*38fd1498Szrj   get_back_and_forw_lists (dep, sd_it.resolved_p,
1447*38fd1498Szrj 			   &con_back_deps, &pro_forw_deps);
1448*38fd1498Szrj 
1449*38fd1498Szrj   remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1450*38fd1498Szrj   remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1451*38fd1498Szrj 
1452*38fd1498Szrj   delete_dep_node (n);
1453*38fd1498Szrj }
1454*38fd1498Szrj 
1455*38fd1498Szrj /* Dump size of the lists.  */
1456*38fd1498Szrj #define DUMP_LISTS_SIZE (2)
1457*38fd1498Szrj 
1458*38fd1498Szrj /* Dump dependencies of the lists.  */
1459*38fd1498Szrj #define DUMP_LISTS_DEPS (4)
1460*38fd1498Szrj 
1461*38fd1498Szrj /* Dump all information about the lists.  */
1462*38fd1498Szrj #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1463*38fd1498Szrj 
1464*38fd1498Szrj /* Dump deps_lists of INSN specified by TYPES to DUMP.
1465*38fd1498Szrj    FLAGS is a bit mask specifying what information about the lists needs
1466*38fd1498Szrj    to be printed.
1467*38fd1498Szrj    If FLAGS has the very first bit set, then dump all information about
1468*38fd1498Szrj    the lists and propagate this bit into the callee dump functions.  */
1469*38fd1498Szrj static void
dump_lists(FILE * dump,rtx insn,sd_list_types_def types,int flags)1470*38fd1498Szrj dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1471*38fd1498Szrj {
1472*38fd1498Szrj   sd_iterator_def sd_it;
1473*38fd1498Szrj   dep_t dep;
1474*38fd1498Szrj   int all;
1475*38fd1498Szrj 
1476*38fd1498Szrj   all = (flags & 1);
1477*38fd1498Szrj 
1478*38fd1498Szrj   if (all)
1479*38fd1498Szrj     flags |= DUMP_LISTS_ALL;
1480*38fd1498Szrj 
1481*38fd1498Szrj   fprintf (dump, "[");
1482*38fd1498Szrj 
1483*38fd1498Szrj   if (flags & DUMP_LISTS_SIZE)
1484*38fd1498Szrj     fprintf (dump, "%d; ", sd_lists_size (insn, types));
1485*38fd1498Szrj 
1486*38fd1498Szrj   if (flags & DUMP_LISTS_DEPS)
1487*38fd1498Szrj     {
1488*38fd1498Szrj       FOR_EACH_DEP (insn, types, sd_it, dep)
1489*38fd1498Szrj 	{
1490*38fd1498Szrj 	  dump_dep (dump, dep, dump_dep_flags | all);
1491*38fd1498Szrj 	  fprintf (dump, " ");
1492*38fd1498Szrj 	}
1493*38fd1498Szrj     }
1494*38fd1498Szrj }
1495*38fd1498Szrj 
1496*38fd1498Szrj /* Dump all information about deps_lists of INSN specified by TYPES
1497*38fd1498Szrj    to STDERR.  */
1498*38fd1498Szrj void
sd_debug_lists(rtx insn,sd_list_types_def types)1499*38fd1498Szrj sd_debug_lists (rtx insn, sd_list_types_def types)
1500*38fd1498Szrj {
1501*38fd1498Szrj   dump_lists (stderr, insn, types, 1);
1502*38fd1498Szrj   fprintf (stderr, "\n");
1503*38fd1498Szrj }
1504*38fd1498Szrj 
1505*38fd1498Szrj /* A wrapper around add_dependence_1, to add a dependence of CON on
1506*38fd1498Szrj    PRO, with type DEP_TYPE.  This function implements special handling
1507*38fd1498Szrj    for REG_DEP_CONTROL dependencies.  For these, we optionally promote
1508*38fd1498Szrj    the type to REG_DEP_ANTI if we can determine that predication is
1509*38fd1498Szrj    impossible; otherwise we add additional true dependencies on the
1510*38fd1498Szrj    INSN_COND_DEPS list of the jump (which PRO must be).  */
1511*38fd1498Szrj void
add_dependence(rtx_insn * con,rtx_insn * pro,enum reg_note dep_type)1512*38fd1498Szrj add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
1513*38fd1498Szrj {
1514*38fd1498Szrj   if (dep_type == REG_DEP_CONTROL
1515*38fd1498Szrj       && !(current_sched_info->flags & DO_PREDICATION))
1516*38fd1498Szrj     dep_type = REG_DEP_ANTI;
1517*38fd1498Szrj 
1518*38fd1498Szrj   /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1519*38fd1498Szrj      so we must also make the insn dependent on the setter of the
1520*38fd1498Szrj      condition.  */
1521*38fd1498Szrj   if (dep_type == REG_DEP_CONTROL)
1522*38fd1498Szrj     {
1523*38fd1498Szrj       rtx_insn *real_pro = pro;
1524*38fd1498Szrj       rtx_insn *other = real_insn_for_shadow (real_pro);
1525*38fd1498Szrj       rtx cond;
1526*38fd1498Szrj 
1527*38fd1498Szrj       if (other != NULL_RTX)
1528*38fd1498Szrj 	real_pro = other;
1529*38fd1498Szrj       cond = sched_get_reverse_condition_uncached (real_pro);
1530*38fd1498Szrj       /* Verify that the insn does not use a different value in
1531*38fd1498Szrj 	 the condition register than the one that was present at
1532*38fd1498Szrj 	 the jump.  */
1533*38fd1498Szrj       if (cond == NULL_RTX)
1534*38fd1498Szrj 	dep_type = REG_DEP_ANTI;
1535*38fd1498Szrj       else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1536*38fd1498Szrj 	{
1537*38fd1498Szrj 	  HARD_REG_SET uses;
1538*38fd1498Szrj 	  CLEAR_HARD_REG_SET (uses);
1539*38fd1498Szrj 	  note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1540*38fd1498Szrj 	  if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1541*38fd1498Szrj 	    dep_type = REG_DEP_ANTI;
1542*38fd1498Szrj 	}
1543*38fd1498Szrj       if (dep_type == REG_DEP_CONTROL)
1544*38fd1498Szrj 	{
1545*38fd1498Szrj 	  if (sched_verbose >= 5)
1546*38fd1498Szrj 	    fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1547*38fd1498Szrj 		     INSN_UID (real_pro));
1548*38fd1498Szrj 	  add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1549*38fd1498Szrj 			       REG_DEP_TRUE, false);
1550*38fd1498Szrj 	}
1551*38fd1498Szrj     }
1552*38fd1498Szrj 
1553*38fd1498Szrj   add_dependence_1 (con, pro, dep_type);
1554*38fd1498Szrj }
1555*38fd1498Szrj 
1556*38fd1498Szrj /* A convenience wrapper to operate on an entire list.  HARD should be
1557*38fd1498Szrj    true if DEP_NONREG should be set on newly created dependencies.  */
1558*38fd1498Szrj 
1559*38fd1498Szrj static void
add_dependence_list(rtx_insn * insn,rtx_insn_list * list,int uncond,enum reg_note dep_type,bool hard)1560*38fd1498Szrj add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
1561*38fd1498Szrj 		     enum reg_note dep_type, bool hard)
1562*38fd1498Szrj {
1563*38fd1498Szrj   mark_as_hard = hard;
1564*38fd1498Szrj   for (; list; list = list->next ())
1565*38fd1498Szrj     {
1566*38fd1498Szrj       if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
1567*38fd1498Szrj 	add_dependence (insn, list->insn (), dep_type);
1568*38fd1498Szrj     }
1569*38fd1498Szrj   mark_as_hard = false;
1570*38fd1498Szrj }
1571*38fd1498Szrj 
1572*38fd1498Szrj /* Similar, but free *LISTP at the same time, when the context
1573*38fd1498Szrj    is not readonly.  HARD should be true if DEP_NONREG should be set on
1574*38fd1498Szrj    newly created dependencies.  */
1575*38fd1498Szrj 
1576*38fd1498Szrj static void
add_dependence_list_and_free(struct deps_desc * deps,rtx_insn * insn,rtx_insn_list ** listp,int uncond,enum reg_note dep_type,bool hard)1577*38fd1498Szrj add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
1578*38fd1498Szrj 			      rtx_insn_list **listp,
1579*38fd1498Szrj                               int uncond, enum reg_note dep_type, bool hard)
1580*38fd1498Szrj {
1581*38fd1498Szrj   add_dependence_list (insn, *listp, uncond, dep_type, hard);
1582*38fd1498Szrj 
1583*38fd1498Szrj   /* We don't want to short-circuit dependencies involving debug
1584*38fd1498Szrj      insns, because they may cause actual dependencies to be
1585*38fd1498Szrj      disregarded.  */
1586*38fd1498Szrj   if (deps->readonly || DEBUG_INSN_P (insn))
1587*38fd1498Szrj     return;
1588*38fd1498Szrj 
1589*38fd1498Szrj   free_INSN_LIST_list (listp);
1590*38fd1498Szrj }
1591*38fd1498Szrj 
1592*38fd1498Szrj /* Remove all occurrences of INSN from LIST.  Return the number of
1593*38fd1498Szrj    occurrences removed.  */
1594*38fd1498Szrj 
1595*38fd1498Szrj static int
remove_from_dependence_list(rtx_insn * insn,rtx_insn_list ** listp)1596*38fd1498Szrj remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp)
1597*38fd1498Szrj {
1598*38fd1498Szrj   int removed = 0;
1599*38fd1498Szrj 
1600*38fd1498Szrj   while (*listp)
1601*38fd1498Szrj     {
1602*38fd1498Szrj       if ((*listp)->insn () == insn)
1603*38fd1498Szrj         {
1604*38fd1498Szrj           remove_free_INSN_LIST_node (listp);
1605*38fd1498Szrj           removed++;
1606*38fd1498Szrj           continue;
1607*38fd1498Szrj         }
1608*38fd1498Szrj 
1609*38fd1498Szrj       listp = (rtx_insn_list **)&XEXP (*listp, 1);
1610*38fd1498Szrj     }
1611*38fd1498Szrj 
1612*38fd1498Szrj   return removed;
1613*38fd1498Szrj }
1614*38fd1498Szrj 
1615*38fd1498Szrj /* Same as above, but process two lists at once.  */
1616*38fd1498Szrj static int
remove_from_both_dependence_lists(rtx_insn * insn,rtx_insn_list ** listp,rtx_expr_list ** exprp)1617*38fd1498Szrj remove_from_both_dependence_lists (rtx_insn *insn,
1618*38fd1498Szrj 				   rtx_insn_list **listp,
1619*38fd1498Szrj 				   rtx_expr_list **exprp)
1620*38fd1498Szrj {
1621*38fd1498Szrj   int removed = 0;
1622*38fd1498Szrj 
1623*38fd1498Szrj   while (*listp)
1624*38fd1498Szrj     {
1625*38fd1498Szrj       if (XEXP (*listp, 0) == insn)
1626*38fd1498Szrj         {
1627*38fd1498Szrj           remove_free_INSN_LIST_node (listp);
1628*38fd1498Szrj           remove_free_EXPR_LIST_node (exprp);
1629*38fd1498Szrj           removed++;
1630*38fd1498Szrj           continue;
1631*38fd1498Szrj         }
1632*38fd1498Szrj 
1633*38fd1498Szrj       listp = (rtx_insn_list **)&XEXP (*listp, 1);
1634*38fd1498Szrj       exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
1635*38fd1498Szrj     }
1636*38fd1498Szrj 
1637*38fd1498Szrj   return removed;
1638*38fd1498Szrj }
1639*38fd1498Szrj 
1640*38fd1498Szrj /* Clear all dependencies for an insn.  */
1641*38fd1498Szrj static void
delete_all_dependences(rtx_insn * insn)1642*38fd1498Szrj delete_all_dependences (rtx_insn *insn)
1643*38fd1498Szrj {
1644*38fd1498Szrj   sd_iterator_def sd_it;
1645*38fd1498Szrj   dep_t dep;
1646*38fd1498Szrj 
1647*38fd1498Szrj   /* The below cycle can be optimized to clear the caches and back_deps
1648*38fd1498Szrj      in one call but that would provoke duplication of code from
1649*38fd1498Szrj      delete_dep ().  */
1650*38fd1498Szrj 
1651*38fd1498Szrj   for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1652*38fd1498Szrj        sd_iterator_cond (&sd_it, &dep);)
1653*38fd1498Szrj     sd_delete_dep (sd_it);
1654*38fd1498Szrj }
1655*38fd1498Szrj 
1656*38fd1498Szrj /* All insns in a scheduling group except the first should only have
1657*38fd1498Szrj    dependencies on the previous insn in the group.  So we find the
1658*38fd1498Szrj    first instruction in the scheduling group by walking the dependence
1659*38fd1498Szrj    chains backwards. Then we add the dependencies for the group to
1660*38fd1498Szrj    the previous nonnote insn.  */
1661*38fd1498Szrj 
1662*38fd1498Szrj static void
chain_to_prev_insn(rtx_insn * insn)1663*38fd1498Szrj chain_to_prev_insn (rtx_insn *insn)
1664*38fd1498Szrj {
1665*38fd1498Szrj   sd_iterator_def sd_it;
1666*38fd1498Szrj   dep_t dep;
1667*38fd1498Szrj   rtx_insn *prev_nonnote;
1668*38fd1498Szrj 
1669*38fd1498Szrj   FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1670*38fd1498Szrj     {
1671*38fd1498Szrj       rtx_insn *i = insn;
1672*38fd1498Szrj       rtx_insn *pro = DEP_PRO (dep);
1673*38fd1498Szrj 
1674*38fd1498Szrj       do
1675*38fd1498Szrj 	{
1676*38fd1498Szrj 	  i = prev_nonnote_insn (i);
1677*38fd1498Szrj 
1678*38fd1498Szrj 	  if (pro == i)
1679*38fd1498Szrj 	    goto next_link;
1680*38fd1498Szrj 	} while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1681*38fd1498Szrj 
1682*38fd1498Szrj       if (! sched_insns_conditions_mutex_p (i, pro))
1683*38fd1498Szrj 	add_dependence (i, pro, DEP_TYPE (dep));
1684*38fd1498Szrj     next_link:;
1685*38fd1498Szrj     }
1686*38fd1498Szrj 
1687*38fd1498Szrj   delete_all_dependences (insn);
1688*38fd1498Szrj 
1689*38fd1498Szrj   prev_nonnote = prev_nonnote_nondebug_insn (insn);
1690*38fd1498Szrj   if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1691*38fd1498Szrj       && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1692*38fd1498Szrj     add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1693*38fd1498Szrj }
1694*38fd1498Szrj 
1695*38fd1498Szrj /* Process an insn's memory dependencies.  There are four kinds of
1696*38fd1498Szrj    dependencies:
1697*38fd1498Szrj 
1698*38fd1498Szrj    (0) read dependence: read follows read
1699*38fd1498Szrj    (1) true dependence: read follows write
1700*38fd1498Szrj    (2) output dependence: write follows write
1701*38fd1498Szrj    (3) anti dependence: write follows read
1702*38fd1498Szrj 
1703*38fd1498Szrj    We are careful to build only dependencies which actually exist, and
1704*38fd1498Szrj    use transitivity to avoid building too many links.  */
1705*38fd1498Szrj 
1706*38fd1498Szrj /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1707*38fd1498Szrj    The MEM is a memory reference contained within INSN, which we are saving
1708*38fd1498Szrj    so that we can do memory aliasing on it.  */
1709*38fd1498Szrj 
1710*38fd1498Szrj static void
add_insn_mem_dependence(struct deps_desc * deps,bool read_p,rtx_insn * insn,rtx mem)1711*38fd1498Szrj add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1712*38fd1498Szrj 			 rtx_insn *insn, rtx mem)
1713*38fd1498Szrj {
1714*38fd1498Szrj   rtx_insn_list **insn_list;
1715*38fd1498Szrj   rtx_insn_list *insn_node;
1716*38fd1498Szrj   rtx_expr_list **mem_list;
1717*38fd1498Szrj   rtx_expr_list *mem_node;
1718*38fd1498Szrj 
1719*38fd1498Szrj   gcc_assert (!deps->readonly);
1720*38fd1498Szrj   if (read_p)
1721*38fd1498Szrj     {
1722*38fd1498Szrj       insn_list = &deps->pending_read_insns;
1723*38fd1498Szrj       mem_list = &deps->pending_read_mems;
1724*38fd1498Szrj       if (!DEBUG_INSN_P (insn))
1725*38fd1498Szrj 	deps->pending_read_list_length++;
1726*38fd1498Szrj     }
1727*38fd1498Szrj   else
1728*38fd1498Szrj     {
1729*38fd1498Szrj       insn_list = &deps->pending_write_insns;
1730*38fd1498Szrj       mem_list = &deps->pending_write_mems;
1731*38fd1498Szrj       deps->pending_write_list_length++;
1732*38fd1498Szrj     }
1733*38fd1498Szrj 
1734*38fd1498Szrj   insn_node = alloc_INSN_LIST (insn, *insn_list);
1735*38fd1498Szrj   *insn_list = insn_node;
1736*38fd1498Szrj 
1737*38fd1498Szrj   if (sched_deps_info->use_cselib)
1738*38fd1498Szrj     {
1739*38fd1498Szrj       mem = shallow_copy_rtx (mem);
1740*38fd1498Szrj       XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1741*38fd1498Szrj 							GET_MODE (mem), insn);
1742*38fd1498Szrj     }
1743*38fd1498Szrj   mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1744*38fd1498Szrj   *mem_list = mem_node;
1745*38fd1498Szrj }
1746*38fd1498Szrj 
1747*38fd1498Szrj /* Make a dependency between every memory reference on the pending lists
1748*38fd1498Szrj    and INSN, thus flushing the pending lists.  FOR_READ is true if emitting
1749*38fd1498Szrj    dependencies for a read operation, similarly with FOR_WRITE.  */
1750*38fd1498Szrj 
1751*38fd1498Szrj static void
flush_pending_lists(struct deps_desc * deps,rtx_insn * insn,int for_read,int for_write)1752*38fd1498Szrj flush_pending_lists (struct deps_desc *deps, rtx_insn *insn, int for_read,
1753*38fd1498Szrj 		     int for_write)
1754*38fd1498Szrj {
1755*38fd1498Szrj   if (for_write)
1756*38fd1498Szrj     {
1757*38fd1498Szrj       add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1758*38fd1498Szrj                                     1, REG_DEP_ANTI, true);
1759*38fd1498Szrj       if (!deps->readonly)
1760*38fd1498Szrj         {
1761*38fd1498Szrj           free_EXPR_LIST_list (&deps->pending_read_mems);
1762*38fd1498Szrj           deps->pending_read_list_length = 0;
1763*38fd1498Szrj         }
1764*38fd1498Szrj     }
1765*38fd1498Szrj 
1766*38fd1498Szrj   add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1767*38fd1498Szrj 				for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1768*38fd1498Szrj 				true);
1769*38fd1498Szrj 
1770*38fd1498Szrj   add_dependence_list_and_free (deps, insn,
1771*38fd1498Szrj                                 &deps->last_pending_memory_flush, 1,
1772*38fd1498Szrj                                 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1773*38fd1498Szrj 				true);
1774*38fd1498Szrj 
1775*38fd1498Szrj   add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1776*38fd1498Szrj 				REG_DEP_ANTI, true);
1777*38fd1498Szrj 
1778*38fd1498Szrj   if (DEBUG_INSN_P (insn))
1779*38fd1498Szrj     {
1780*38fd1498Szrj       if (for_write)
1781*38fd1498Szrj 	free_INSN_LIST_list (&deps->pending_read_insns);
1782*38fd1498Szrj       free_INSN_LIST_list (&deps->pending_write_insns);
1783*38fd1498Szrj       free_INSN_LIST_list (&deps->last_pending_memory_flush);
1784*38fd1498Szrj       free_INSN_LIST_list (&deps->pending_jump_insns);
1785*38fd1498Szrj     }
1786*38fd1498Szrj 
1787*38fd1498Szrj   if (!deps->readonly)
1788*38fd1498Szrj     {
1789*38fd1498Szrj       free_EXPR_LIST_list (&deps->pending_write_mems);
1790*38fd1498Szrj       deps->pending_write_list_length = 0;
1791*38fd1498Szrj 
1792*38fd1498Szrj       deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1793*38fd1498Szrj       deps->pending_flush_length = 1;
1794*38fd1498Szrj     }
1795*38fd1498Szrj   mark_as_hard = false;
1796*38fd1498Szrj }
1797*38fd1498Szrj 
1798*38fd1498Szrj /* Instruction which dependencies we are analyzing.  */
1799*38fd1498Szrj static rtx_insn *cur_insn = NULL;
1800*38fd1498Szrj 
1801*38fd1498Szrj /* Implement hooks for haifa scheduler.  */
1802*38fd1498Szrj 
1803*38fd1498Szrj static void
haifa_start_insn(rtx_insn * insn)1804*38fd1498Szrj haifa_start_insn (rtx_insn *insn)
1805*38fd1498Szrj {
1806*38fd1498Szrj   gcc_assert (insn && !cur_insn);
1807*38fd1498Szrj 
1808*38fd1498Szrj   cur_insn = insn;
1809*38fd1498Szrj }
1810*38fd1498Szrj 
1811*38fd1498Szrj static void
haifa_finish_insn(void)1812*38fd1498Szrj haifa_finish_insn (void)
1813*38fd1498Szrj {
1814*38fd1498Szrj   cur_insn = NULL;
1815*38fd1498Szrj }
1816*38fd1498Szrj 
1817*38fd1498Szrj void
haifa_note_reg_set(int regno)1818*38fd1498Szrj haifa_note_reg_set (int regno)
1819*38fd1498Szrj {
1820*38fd1498Szrj   SET_REGNO_REG_SET (reg_pending_sets, regno);
1821*38fd1498Szrj }
1822*38fd1498Szrj 
1823*38fd1498Szrj void
haifa_note_reg_clobber(int regno)1824*38fd1498Szrj haifa_note_reg_clobber (int regno)
1825*38fd1498Szrj {
1826*38fd1498Szrj   SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1827*38fd1498Szrj }
1828*38fd1498Szrj 
1829*38fd1498Szrj void
haifa_note_reg_use(int regno)1830*38fd1498Szrj haifa_note_reg_use (int regno)
1831*38fd1498Szrj {
1832*38fd1498Szrj   SET_REGNO_REG_SET (reg_pending_uses, regno);
1833*38fd1498Szrj }
1834*38fd1498Szrj 
1835*38fd1498Szrj static void
haifa_note_mem_dep(rtx mem,rtx pending_mem,rtx_insn * pending_insn,ds_t ds)1836*38fd1498Szrj haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
1837*38fd1498Szrj {
1838*38fd1498Szrj   if (!(ds & SPECULATIVE))
1839*38fd1498Szrj     {
1840*38fd1498Szrj       mem = NULL_RTX;
1841*38fd1498Szrj       pending_mem = NULL_RTX;
1842*38fd1498Szrj     }
1843*38fd1498Szrj   else
1844*38fd1498Szrj     gcc_assert (ds & BEGIN_DATA);
1845*38fd1498Szrj 
1846*38fd1498Szrj   {
1847*38fd1498Szrj     dep_def _dep, *dep = &_dep;
1848*38fd1498Szrj 
1849*38fd1498Szrj     init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1850*38fd1498Szrj                 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1851*38fd1498Szrj     DEP_NONREG (dep) = 1;
1852*38fd1498Szrj     maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1853*38fd1498Szrj   }
1854*38fd1498Szrj 
1855*38fd1498Szrj }
1856*38fd1498Szrj 
1857*38fd1498Szrj static void
haifa_note_dep(rtx_insn * elem,ds_t ds)1858*38fd1498Szrj haifa_note_dep (rtx_insn *elem, ds_t ds)
1859*38fd1498Szrj {
1860*38fd1498Szrj   dep_def _dep;
1861*38fd1498Szrj   dep_t dep = &_dep;
1862*38fd1498Szrj 
1863*38fd1498Szrj   init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1864*38fd1498Szrj   if (mark_as_hard)
1865*38fd1498Szrj     DEP_NONREG (dep) = 1;
1866*38fd1498Szrj   maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1867*38fd1498Szrj }
1868*38fd1498Szrj 
1869*38fd1498Szrj static void
note_reg_use(int r)1870*38fd1498Szrj note_reg_use (int r)
1871*38fd1498Szrj {
1872*38fd1498Szrj   if (sched_deps_info->note_reg_use)
1873*38fd1498Szrj     sched_deps_info->note_reg_use (r);
1874*38fd1498Szrj }
1875*38fd1498Szrj 
1876*38fd1498Szrj static void
note_reg_set(int r)1877*38fd1498Szrj note_reg_set (int r)
1878*38fd1498Szrj {
1879*38fd1498Szrj   if (sched_deps_info->note_reg_set)
1880*38fd1498Szrj     sched_deps_info->note_reg_set (r);
1881*38fd1498Szrj }
1882*38fd1498Szrj 
1883*38fd1498Szrj static void
note_reg_clobber(int r)1884*38fd1498Szrj note_reg_clobber (int r)
1885*38fd1498Szrj {
1886*38fd1498Szrj   if (sched_deps_info->note_reg_clobber)
1887*38fd1498Szrj     sched_deps_info->note_reg_clobber (r);
1888*38fd1498Szrj }
1889*38fd1498Szrj 
1890*38fd1498Szrj static void
note_mem_dep(rtx m1,rtx m2,rtx_insn * e,ds_t ds)1891*38fd1498Szrj note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
1892*38fd1498Szrj {
1893*38fd1498Szrj   if (sched_deps_info->note_mem_dep)
1894*38fd1498Szrj     sched_deps_info->note_mem_dep (m1, m2, e, ds);
1895*38fd1498Szrj }
1896*38fd1498Szrj 
1897*38fd1498Szrj static void
note_dep(rtx_insn * e,ds_t ds)1898*38fd1498Szrj note_dep (rtx_insn *e, ds_t ds)
1899*38fd1498Szrj {
1900*38fd1498Szrj   if (sched_deps_info->note_dep)
1901*38fd1498Szrj     sched_deps_info->note_dep (e, ds);
1902*38fd1498Szrj }
1903*38fd1498Szrj 
1904*38fd1498Szrj /* Return corresponding to DS reg_note.  */
1905*38fd1498Szrj enum reg_note
ds_to_dt(ds_t ds)1906*38fd1498Szrj ds_to_dt (ds_t ds)
1907*38fd1498Szrj {
1908*38fd1498Szrj   if (ds & DEP_TRUE)
1909*38fd1498Szrj     return REG_DEP_TRUE;
1910*38fd1498Szrj   else if (ds & DEP_OUTPUT)
1911*38fd1498Szrj     return REG_DEP_OUTPUT;
1912*38fd1498Szrj   else if (ds & DEP_ANTI)
1913*38fd1498Szrj     return REG_DEP_ANTI;
1914*38fd1498Szrj   else
1915*38fd1498Szrj     {
1916*38fd1498Szrj       gcc_assert (ds & DEP_CONTROL);
1917*38fd1498Szrj       return REG_DEP_CONTROL;
1918*38fd1498Szrj     }
1919*38fd1498Szrj }
1920*38fd1498Szrj 
1921*38fd1498Szrj 
1922*38fd1498Szrj 
1923*38fd1498Szrj /* Functions for computation of info needed for register pressure
1924*38fd1498Szrj    sensitive insn scheduling.  */
1925*38fd1498Szrj 
1926*38fd1498Szrj 
1927*38fd1498Szrj /* Allocate and return reg_use_data structure for REGNO and INSN.  */
1928*38fd1498Szrj static struct reg_use_data *
create_insn_reg_use(int regno,rtx_insn * insn)1929*38fd1498Szrj create_insn_reg_use (int regno, rtx_insn *insn)
1930*38fd1498Szrj {
1931*38fd1498Szrj   struct reg_use_data *use;
1932*38fd1498Szrj 
1933*38fd1498Szrj   use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1934*38fd1498Szrj   use->regno = regno;
1935*38fd1498Szrj   use->insn = insn;
1936*38fd1498Szrj   use->next_insn_use = INSN_REG_USE_LIST (insn);
1937*38fd1498Szrj   INSN_REG_USE_LIST (insn) = use;
1938*38fd1498Szrj   return use;
1939*38fd1498Szrj }
1940*38fd1498Szrj 
1941*38fd1498Szrj /* Allocate reg_set_data structure for REGNO and INSN.  */
1942*38fd1498Szrj static void
create_insn_reg_set(int regno,rtx insn)1943*38fd1498Szrj create_insn_reg_set (int regno, rtx insn)
1944*38fd1498Szrj {
1945*38fd1498Szrj   struct reg_set_data *set;
1946*38fd1498Szrj 
1947*38fd1498Szrj   set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1948*38fd1498Szrj   set->regno = regno;
1949*38fd1498Szrj   set->insn = insn;
1950*38fd1498Szrj   set->next_insn_set = INSN_REG_SET_LIST (insn);
1951*38fd1498Szrj   INSN_REG_SET_LIST (insn) = set;
1952*38fd1498Szrj }
1953*38fd1498Szrj 
1954*38fd1498Szrj /* Set up insn register uses for INSN and dependency context DEPS.  */
1955*38fd1498Szrj static void
setup_insn_reg_uses(struct deps_desc * deps,rtx_insn * insn)1956*38fd1498Szrj setup_insn_reg_uses (struct deps_desc *deps, rtx_insn *insn)
1957*38fd1498Szrj {
1958*38fd1498Szrj   unsigned i;
1959*38fd1498Szrj   reg_set_iterator rsi;
1960*38fd1498Szrj   struct reg_use_data *use, *use2, *next;
1961*38fd1498Szrj   struct deps_reg *reg_last;
1962*38fd1498Szrj 
1963*38fd1498Szrj   EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1964*38fd1498Szrj     {
1965*38fd1498Szrj       if (i < FIRST_PSEUDO_REGISTER
1966*38fd1498Szrj 	  && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1967*38fd1498Szrj 	continue;
1968*38fd1498Szrj 
1969*38fd1498Szrj       if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1970*38fd1498Szrj 	  && ! REGNO_REG_SET_P (reg_pending_sets, i)
1971*38fd1498Szrj 	  && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1972*38fd1498Szrj 	/* Ignore use which is not dying.  */
1973*38fd1498Szrj 	continue;
1974*38fd1498Szrj 
1975*38fd1498Szrj       use = create_insn_reg_use (i, insn);
1976*38fd1498Szrj       use->next_regno_use = use;
1977*38fd1498Szrj       reg_last = &deps->reg_last[i];
1978*38fd1498Szrj 
1979*38fd1498Szrj       /* Create the cycle list of uses.  */
1980*38fd1498Szrj       for (rtx_insn_list *list = reg_last->uses; list; list = list->next ())
1981*38fd1498Szrj 	{
1982*38fd1498Szrj 	  use2 = create_insn_reg_use (i, list->insn ());
1983*38fd1498Szrj 	  next = use->next_regno_use;
1984*38fd1498Szrj 	  use->next_regno_use = use2;
1985*38fd1498Szrj 	  use2->next_regno_use = next;
1986*38fd1498Szrj 	}
1987*38fd1498Szrj     }
1988*38fd1498Szrj }
1989*38fd1498Szrj 
1990*38fd1498Szrj /* Register pressure info for the currently processed insn.  */
1991*38fd1498Szrj static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1992*38fd1498Szrj 
1993*38fd1498Szrj /* Return TRUE if INSN has the use structure for REGNO.  */
1994*38fd1498Szrj static bool
insn_use_p(rtx insn,int regno)1995*38fd1498Szrj insn_use_p (rtx insn, int regno)
1996*38fd1498Szrj {
1997*38fd1498Szrj   struct reg_use_data *use;
1998*38fd1498Szrj 
1999*38fd1498Szrj   for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2000*38fd1498Szrj     if (use->regno == regno)
2001*38fd1498Szrj       return true;
2002*38fd1498Szrj   return false;
2003*38fd1498Szrj }
2004*38fd1498Szrj 
2005*38fd1498Szrj /* Update the register pressure info after birth of pseudo register REGNO
2006*38fd1498Szrj    in INSN.  Arguments CLOBBER_P and UNUSED_P say correspondingly that
2007*38fd1498Szrj    the register is in clobber or unused after the insn.  */
2008*38fd1498Szrj static void
mark_insn_pseudo_birth(rtx insn,int regno,bool clobber_p,bool unused_p)2009*38fd1498Szrj mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2010*38fd1498Szrj {
2011*38fd1498Szrj   int incr, new_incr;
2012*38fd1498Szrj   enum reg_class cl;
2013*38fd1498Szrj 
2014*38fd1498Szrj   gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2015*38fd1498Szrj   cl = sched_regno_pressure_class[regno];
2016*38fd1498Szrj   if (cl != NO_REGS)
2017*38fd1498Szrj     {
2018*38fd1498Szrj       incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2019*38fd1498Szrj       if (clobber_p)
2020*38fd1498Szrj 	{
2021*38fd1498Szrj 	  new_incr = reg_pressure_info[cl].clobber_increase + incr;
2022*38fd1498Szrj 	  reg_pressure_info[cl].clobber_increase = new_incr;
2023*38fd1498Szrj 	}
2024*38fd1498Szrj       else if (unused_p)
2025*38fd1498Szrj 	{
2026*38fd1498Szrj 	  new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2027*38fd1498Szrj 	  reg_pressure_info[cl].unused_set_increase = new_incr;
2028*38fd1498Szrj 	}
2029*38fd1498Szrj       else
2030*38fd1498Szrj 	{
2031*38fd1498Szrj 	  new_incr = reg_pressure_info[cl].set_increase + incr;
2032*38fd1498Szrj 	  reg_pressure_info[cl].set_increase = new_incr;
2033*38fd1498Szrj 	  if (! insn_use_p (insn, regno))
2034*38fd1498Szrj 	    reg_pressure_info[cl].change += incr;
2035*38fd1498Szrj 	  create_insn_reg_set (regno, insn);
2036*38fd1498Szrj 	}
2037*38fd1498Szrj       gcc_assert (new_incr < (1 << INCREASE_BITS));
2038*38fd1498Szrj     }
2039*38fd1498Szrj }
2040*38fd1498Szrj 
2041*38fd1498Szrj /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2042*38fd1498Szrj    hard registers involved in the birth.  */
2043*38fd1498Szrj static void
mark_insn_hard_regno_birth(rtx insn,int regno,int nregs,bool clobber_p,bool unused_p)2044*38fd1498Szrj mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2045*38fd1498Szrj 			    bool clobber_p, bool unused_p)
2046*38fd1498Szrj {
2047*38fd1498Szrj   enum reg_class cl;
2048*38fd1498Szrj   int new_incr, last = regno + nregs;
2049*38fd1498Szrj 
2050*38fd1498Szrj   while (regno < last)
2051*38fd1498Szrj     {
2052*38fd1498Szrj       gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2053*38fd1498Szrj       if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2054*38fd1498Szrj 	{
2055*38fd1498Szrj 	  cl = sched_regno_pressure_class[regno];
2056*38fd1498Szrj 	  if (cl != NO_REGS)
2057*38fd1498Szrj 	    {
2058*38fd1498Szrj 	      if (clobber_p)
2059*38fd1498Szrj 		{
2060*38fd1498Szrj 		  new_incr = reg_pressure_info[cl].clobber_increase + 1;
2061*38fd1498Szrj 		  reg_pressure_info[cl].clobber_increase = new_incr;
2062*38fd1498Szrj 		}
2063*38fd1498Szrj 	      else if (unused_p)
2064*38fd1498Szrj 		{
2065*38fd1498Szrj 		  new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2066*38fd1498Szrj 		  reg_pressure_info[cl].unused_set_increase = new_incr;
2067*38fd1498Szrj 		}
2068*38fd1498Szrj 	      else
2069*38fd1498Szrj 		{
2070*38fd1498Szrj 		  new_incr = reg_pressure_info[cl].set_increase + 1;
2071*38fd1498Szrj 		  reg_pressure_info[cl].set_increase = new_incr;
2072*38fd1498Szrj 		  if (! insn_use_p (insn, regno))
2073*38fd1498Szrj 		    reg_pressure_info[cl].change += 1;
2074*38fd1498Szrj 		  create_insn_reg_set (regno, insn);
2075*38fd1498Szrj 		}
2076*38fd1498Szrj 	      gcc_assert (new_incr < (1 << INCREASE_BITS));
2077*38fd1498Szrj 	    }
2078*38fd1498Szrj 	}
2079*38fd1498Szrj       regno++;
2080*38fd1498Szrj     }
2081*38fd1498Szrj }
2082*38fd1498Szrj 
2083*38fd1498Szrj /* Update the register pressure info after birth of pseudo or hard
2084*38fd1498Szrj    register REG in INSN.  Arguments CLOBBER_P and UNUSED_P say
2085*38fd1498Szrj    correspondingly that the register is in clobber or unused after the
2086*38fd1498Szrj    insn.  */
2087*38fd1498Szrj static void
mark_insn_reg_birth(rtx insn,rtx reg,bool clobber_p,bool unused_p)2088*38fd1498Szrj mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2089*38fd1498Szrj {
2090*38fd1498Szrj   int regno;
2091*38fd1498Szrj 
2092*38fd1498Szrj   if (GET_CODE (reg) == SUBREG)
2093*38fd1498Szrj     reg = SUBREG_REG (reg);
2094*38fd1498Szrj 
2095*38fd1498Szrj   if (! REG_P (reg))
2096*38fd1498Szrj     return;
2097*38fd1498Szrj 
2098*38fd1498Szrj   regno = REGNO (reg);
2099*38fd1498Szrj   if (regno < FIRST_PSEUDO_REGISTER)
2100*38fd1498Szrj     mark_insn_hard_regno_birth (insn, regno, REG_NREGS (reg),
2101*38fd1498Szrj 				clobber_p, unused_p);
2102*38fd1498Szrj   else
2103*38fd1498Szrj     mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2104*38fd1498Szrj }
2105*38fd1498Szrj 
2106*38fd1498Szrj /* Update the register pressure info after death of pseudo register
2107*38fd1498Szrj    REGNO.  */
2108*38fd1498Szrj static void
mark_pseudo_death(int regno)2109*38fd1498Szrj mark_pseudo_death (int regno)
2110*38fd1498Szrj {
2111*38fd1498Szrj   int incr;
2112*38fd1498Szrj   enum reg_class cl;
2113*38fd1498Szrj 
2114*38fd1498Szrj   gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2115*38fd1498Szrj   cl = sched_regno_pressure_class[regno];
2116*38fd1498Szrj   if (cl != NO_REGS)
2117*38fd1498Szrj     {
2118*38fd1498Szrj       incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2119*38fd1498Szrj       reg_pressure_info[cl].change -= incr;
2120*38fd1498Szrj     }
2121*38fd1498Szrj }
2122*38fd1498Szrj 
2123*38fd1498Szrj /* Like mark_pseudo_death except that NREGS saying how many hard
2124*38fd1498Szrj    registers involved in the death.  */
2125*38fd1498Szrj static void
mark_hard_regno_death(int regno,int nregs)2126*38fd1498Szrj mark_hard_regno_death (int regno, int nregs)
2127*38fd1498Szrj {
2128*38fd1498Szrj   enum reg_class cl;
2129*38fd1498Szrj   int last = regno + nregs;
2130*38fd1498Szrj 
2131*38fd1498Szrj   while (regno < last)
2132*38fd1498Szrj     {
2133*38fd1498Szrj       gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2134*38fd1498Szrj       if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2135*38fd1498Szrj 	{
2136*38fd1498Szrj 	  cl = sched_regno_pressure_class[regno];
2137*38fd1498Szrj 	  if (cl != NO_REGS)
2138*38fd1498Szrj 	    reg_pressure_info[cl].change -= 1;
2139*38fd1498Szrj 	}
2140*38fd1498Szrj       regno++;
2141*38fd1498Szrj     }
2142*38fd1498Szrj }
2143*38fd1498Szrj 
2144*38fd1498Szrj /* Update the register pressure info after death of pseudo or hard
2145*38fd1498Szrj    register REG.  */
2146*38fd1498Szrj static void
mark_reg_death(rtx reg)2147*38fd1498Szrj mark_reg_death (rtx reg)
2148*38fd1498Szrj {
2149*38fd1498Szrj   int regno;
2150*38fd1498Szrj 
2151*38fd1498Szrj   if (GET_CODE (reg) == SUBREG)
2152*38fd1498Szrj     reg = SUBREG_REG (reg);
2153*38fd1498Szrj 
2154*38fd1498Szrj   if (! REG_P (reg))
2155*38fd1498Szrj     return;
2156*38fd1498Szrj 
2157*38fd1498Szrj   regno = REGNO (reg);
2158*38fd1498Szrj   if (regno < FIRST_PSEUDO_REGISTER)
2159*38fd1498Szrj     mark_hard_regno_death (regno, REG_NREGS (reg));
2160*38fd1498Szrj   else
2161*38fd1498Szrj     mark_pseudo_death (regno);
2162*38fd1498Szrj }
2163*38fd1498Szrj 
2164*38fd1498Szrj /* Process SETTER of REG.  DATA is an insn containing the setter.  */
2165*38fd1498Szrj static void
mark_insn_reg_store(rtx reg,const_rtx setter,void * data)2166*38fd1498Szrj mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2167*38fd1498Szrj {
2168*38fd1498Szrj   if (setter != NULL_RTX && GET_CODE (setter) != SET)
2169*38fd1498Szrj     return;
2170*38fd1498Szrj   mark_insn_reg_birth
2171*38fd1498Szrj     ((rtx) data, reg, false,
2172*38fd1498Szrj      find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2173*38fd1498Szrj }
2174*38fd1498Szrj 
2175*38fd1498Szrj /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs.  */
2176*38fd1498Szrj static void
mark_insn_reg_clobber(rtx reg,const_rtx setter,void * data)2177*38fd1498Szrj mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2178*38fd1498Szrj {
2179*38fd1498Szrj   if (GET_CODE (setter) == CLOBBER)
2180*38fd1498Szrj     mark_insn_reg_birth ((rtx) data, reg, true, false);
2181*38fd1498Szrj }
2182*38fd1498Szrj 
2183*38fd1498Szrj /* Set up reg pressure info related to INSN.  */
2184*38fd1498Szrj void
init_insn_reg_pressure_info(rtx_insn * insn)2185*38fd1498Szrj init_insn_reg_pressure_info (rtx_insn *insn)
2186*38fd1498Szrj {
2187*38fd1498Szrj   int i, len;
2188*38fd1498Szrj   enum reg_class cl;
2189*38fd1498Szrj   static struct reg_pressure_data *pressure_info;
2190*38fd1498Szrj   rtx link;
2191*38fd1498Szrj 
2192*38fd1498Szrj   gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
2193*38fd1498Szrj 
2194*38fd1498Szrj   if (! INSN_P (insn))
2195*38fd1498Szrj     return;
2196*38fd1498Szrj 
2197*38fd1498Szrj   for (i = 0; i < ira_pressure_classes_num; i++)
2198*38fd1498Szrj     {
2199*38fd1498Szrj       cl = ira_pressure_classes[i];
2200*38fd1498Szrj       reg_pressure_info[cl].clobber_increase = 0;
2201*38fd1498Szrj       reg_pressure_info[cl].set_increase = 0;
2202*38fd1498Szrj       reg_pressure_info[cl].unused_set_increase = 0;
2203*38fd1498Szrj       reg_pressure_info[cl].change = 0;
2204*38fd1498Szrj     }
2205*38fd1498Szrj 
2206*38fd1498Szrj   note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2207*38fd1498Szrj 
2208*38fd1498Szrj   note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2209*38fd1498Szrj 
2210*38fd1498Szrj   if (AUTO_INC_DEC)
2211*38fd1498Szrj     for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2212*38fd1498Szrj       if (REG_NOTE_KIND (link) == REG_INC)
2213*38fd1498Szrj 	mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2214*38fd1498Szrj 
2215*38fd1498Szrj   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2216*38fd1498Szrj     if (REG_NOTE_KIND (link) == REG_DEAD)
2217*38fd1498Szrj       mark_reg_death (XEXP (link, 0));
2218*38fd1498Szrj 
2219*38fd1498Szrj   len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2220*38fd1498Szrj   pressure_info
2221*38fd1498Szrj     = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2222*38fd1498Szrj   if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2223*38fd1498Szrj     INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2224*38fd1498Szrj 						    * sizeof (int), 1);
2225*38fd1498Szrj   for (i = 0; i < ira_pressure_classes_num; i++)
2226*38fd1498Szrj     {
2227*38fd1498Szrj       cl = ira_pressure_classes[i];
2228*38fd1498Szrj       pressure_info[i].clobber_increase
2229*38fd1498Szrj 	= reg_pressure_info[cl].clobber_increase;
2230*38fd1498Szrj       pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2231*38fd1498Szrj       pressure_info[i].unused_set_increase
2232*38fd1498Szrj 	= reg_pressure_info[cl].unused_set_increase;
2233*38fd1498Szrj       pressure_info[i].change = reg_pressure_info[cl].change;
2234*38fd1498Szrj     }
2235*38fd1498Szrj }
2236*38fd1498Szrj 
2237*38fd1498Szrj 
2238*38fd1498Szrj 
2239*38fd1498Szrj 
2240*38fd1498Szrj /* Internal variable for sched_analyze_[12] () functions.
2241*38fd1498Szrj    If it is nonzero, this means that sched_analyze_[12] looks
2242*38fd1498Szrj    at the most toplevel SET.  */
2243*38fd1498Szrj static bool can_start_lhs_rhs_p;
2244*38fd1498Szrj 
2245*38fd1498Szrj /* Extend reg info for the deps context DEPS given that
2246*38fd1498Szrj    we have just generated a register numbered REGNO.  */
2247*38fd1498Szrj static void
extend_deps_reg_info(struct deps_desc * deps,int regno)2248*38fd1498Szrj extend_deps_reg_info (struct deps_desc *deps, int regno)
2249*38fd1498Szrj {
2250*38fd1498Szrj   int max_regno = regno + 1;
2251*38fd1498Szrj 
2252*38fd1498Szrj   gcc_assert (!reload_completed);
2253*38fd1498Szrj 
2254*38fd1498Szrj   /* In a readonly context, it would not hurt to extend info,
2255*38fd1498Szrj      but it should not be needed.  */
2256*38fd1498Szrj   if (reload_completed && deps->readonly)
2257*38fd1498Szrj     {
2258*38fd1498Szrj       deps->max_reg = max_regno;
2259*38fd1498Szrj       return;
2260*38fd1498Szrj     }
2261*38fd1498Szrj 
2262*38fd1498Szrj   if (max_regno > deps->max_reg)
2263*38fd1498Szrj     {
2264*38fd1498Szrj       deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2265*38fd1498Szrj                                    max_regno);
2266*38fd1498Szrj       memset (&deps->reg_last[deps->max_reg],
2267*38fd1498Szrj               0, (max_regno - deps->max_reg)
2268*38fd1498Szrj               * sizeof (struct deps_reg));
2269*38fd1498Szrj       deps->max_reg = max_regno;
2270*38fd1498Szrj     }
2271*38fd1498Szrj }
2272*38fd1498Szrj 
2273*38fd1498Szrj /* Extends REG_INFO_P if needed.  */
2274*38fd1498Szrj void
maybe_extend_reg_info_p(void)2275*38fd1498Szrj maybe_extend_reg_info_p (void)
2276*38fd1498Szrj {
2277*38fd1498Szrj   /* Extend REG_INFO_P, if needed.  */
2278*38fd1498Szrj   if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2279*38fd1498Szrj     {
2280*38fd1498Szrj       size_t new_reg_info_p_size = max_regno + 128;
2281*38fd1498Szrj 
2282*38fd1498Szrj       gcc_assert (!reload_completed && sel_sched_p ());
2283*38fd1498Szrj 
2284*38fd1498Szrj       reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2285*38fd1498Szrj                                                     new_reg_info_p_size,
2286*38fd1498Szrj                                                     reg_info_p_size,
2287*38fd1498Szrj                                                     sizeof (*reg_info_p));
2288*38fd1498Szrj       reg_info_p_size = new_reg_info_p_size;
2289*38fd1498Szrj     }
2290*38fd1498Szrj }
2291*38fd1498Szrj 
2292*38fd1498Szrj /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2293*38fd1498Szrj    The type of the reference is specified by REF and can be SET,
2294*38fd1498Szrj    CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE.  */
2295*38fd1498Szrj 
2296*38fd1498Szrj static void
sched_analyze_reg(struct deps_desc * deps,int regno,machine_mode mode,enum rtx_code ref,rtx_insn * insn)2297*38fd1498Szrj sched_analyze_reg (struct deps_desc *deps, int regno, machine_mode mode,
2298*38fd1498Szrj 		   enum rtx_code ref, rtx_insn *insn)
2299*38fd1498Szrj {
2300*38fd1498Szrj   /* We could emit new pseudos in renaming.  Extend the reg structures.  */
2301*38fd1498Szrj   if (!reload_completed && sel_sched_p ()
2302*38fd1498Szrj       && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2303*38fd1498Szrj     extend_deps_reg_info (deps, regno);
2304*38fd1498Szrj 
2305*38fd1498Szrj   maybe_extend_reg_info_p ();
2306*38fd1498Szrj 
2307*38fd1498Szrj   /* A hard reg in a wide mode may really be multiple registers.
2308*38fd1498Szrj      If so, mark all of them just like the first.  */
2309*38fd1498Szrj   if (regno < FIRST_PSEUDO_REGISTER)
2310*38fd1498Szrj     {
2311*38fd1498Szrj       int i = hard_regno_nregs (regno, mode);
2312*38fd1498Szrj       if (ref == SET)
2313*38fd1498Szrj 	{
2314*38fd1498Szrj 	  while (--i >= 0)
2315*38fd1498Szrj 	    note_reg_set (regno + i);
2316*38fd1498Szrj 	}
2317*38fd1498Szrj       else if (ref == USE)
2318*38fd1498Szrj 	{
2319*38fd1498Szrj 	  while (--i >= 0)
2320*38fd1498Szrj 	    note_reg_use (regno + i);
2321*38fd1498Szrj 	}
2322*38fd1498Szrj       else
2323*38fd1498Szrj 	{
2324*38fd1498Szrj 	  while (--i >= 0)
2325*38fd1498Szrj 	    note_reg_clobber (regno + i);
2326*38fd1498Szrj 	}
2327*38fd1498Szrj     }
2328*38fd1498Szrj 
2329*38fd1498Szrj   /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2330*38fd1498Szrj      it does not reload.  Ignore these as they have served their
2331*38fd1498Szrj      purpose already.  */
2332*38fd1498Szrj   else if (regno >= deps->max_reg)
2333*38fd1498Szrj     {
2334*38fd1498Szrj       enum rtx_code code = GET_CODE (PATTERN (insn));
2335*38fd1498Szrj       gcc_assert (code == USE || code == CLOBBER);
2336*38fd1498Szrj     }
2337*38fd1498Szrj 
2338*38fd1498Szrj   else
2339*38fd1498Szrj     {
2340*38fd1498Szrj       if (ref == SET)
2341*38fd1498Szrj 	note_reg_set (regno);
2342*38fd1498Szrj       else if (ref == USE)
2343*38fd1498Szrj 	note_reg_use (regno);
2344*38fd1498Szrj       else
2345*38fd1498Szrj 	note_reg_clobber (regno);
2346*38fd1498Szrj 
2347*38fd1498Szrj       /* Pseudos that are REG_EQUIV to something may be replaced
2348*38fd1498Szrj 	 by that during reloading.  We need only add dependencies for
2349*38fd1498Szrj 	the address in the REG_EQUIV note.  */
2350*38fd1498Szrj       if (!reload_completed && get_reg_known_equiv_p (regno))
2351*38fd1498Szrj 	{
2352*38fd1498Szrj 	  rtx t = get_reg_known_value (regno);
2353*38fd1498Szrj 	  if (MEM_P (t))
2354*38fd1498Szrj 	    sched_analyze_2 (deps, XEXP (t, 0), insn);
2355*38fd1498Szrj 	}
2356*38fd1498Szrj 
2357*38fd1498Szrj       /* Don't let it cross a call after scheduling if it doesn't
2358*38fd1498Szrj 	 already cross one.  */
2359*38fd1498Szrj       if (REG_N_CALLS_CROSSED (regno) == 0)
2360*38fd1498Szrj 	{
2361*38fd1498Szrj 	  if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2362*38fd1498Szrj 	    deps->sched_before_next_call
2363*38fd1498Szrj 	      = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2364*38fd1498Szrj 	  else
2365*38fd1498Szrj 	    add_dependence_list (insn, deps->last_function_call, 1,
2366*38fd1498Szrj 				 REG_DEP_ANTI, false);
2367*38fd1498Szrj 	}
2368*38fd1498Szrj     }
2369*38fd1498Szrj }
2370*38fd1498Szrj 
2371*38fd1498Szrj /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2372*38fd1498Szrj    rtx, X, creating all dependencies generated by the write to the
2373*38fd1498Szrj    destination of X, and reads of everything mentioned.  */
2374*38fd1498Szrj 
2375*38fd1498Szrj static void
sched_analyze_1(struct deps_desc * deps,rtx x,rtx_insn * insn)2376*38fd1498Szrj sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2377*38fd1498Szrj {
2378*38fd1498Szrj   rtx dest = XEXP (x, 0);
2379*38fd1498Szrj   enum rtx_code code = GET_CODE (x);
2380*38fd1498Szrj   bool cslr_p = can_start_lhs_rhs_p;
2381*38fd1498Szrj 
2382*38fd1498Szrj   can_start_lhs_rhs_p = false;
2383*38fd1498Szrj 
2384*38fd1498Szrj   gcc_assert (dest);
2385*38fd1498Szrj   if (dest == 0)
2386*38fd1498Szrj     return;
2387*38fd1498Szrj 
2388*38fd1498Szrj   if (cslr_p && sched_deps_info->start_lhs)
2389*38fd1498Szrj     sched_deps_info->start_lhs (dest);
2390*38fd1498Szrj 
2391*38fd1498Szrj   if (GET_CODE (dest) == PARALLEL)
2392*38fd1498Szrj     {
2393*38fd1498Szrj       int i;
2394*38fd1498Szrj 
2395*38fd1498Szrj       for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2396*38fd1498Szrj 	if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2397*38fd1498Szrj 	  sched_analyze_1 (deps,
2398*38fd1498Szrj 			   gen_rtx_CLOBBER (VOIDmode,
2399*38fd1498Szrj 					    XEXP (XVECEXP (dest, 0, i), 0)),
2400*38fd1498Szrj 			   insn);
2401*38fd1498Szrj 
2402*38fd1498Szrj       if (cslr_p && sched_deps_info->finish_lhs)
2403*38fd1498Szrj 	sched_deps_info->finish_lhs ();
2404*38fd1498Szrj 
2405*38fd1498Szrj       if (code == SET)
2406*38fd1498Szrj 	{
2407*38fd1498Szrj 	  can_start_lhs_rhs_p = cslr_p;
2408*38fd1498Szrj 
2409*38fd1498Szrj 	  sched_analyze_2 (deps, SET_SRC (x), insn);
2410*38fd1498Szrj 
2411*38fd1498Szrj 	  can_start_lhs_rhs_p = false;
2412*38fd1498Szrj 	}
2413*38fd1498Szrj 
2414*38fd1498Szrj       return;
2415*38fd1498Szrj     }
2416*38fd1498Szrj 
2417*38fd1498Szrj   while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2418*38fd1498Szrj 	 || GET_CODE (dest) == ZERO_EXTRACT)
2419*38fd1498Szrj     {
2420*38fd1498Szrj       if (GET_CODE (dest) == STRICT_LOW_PART
2421*38fd1498Szrj 	 || GET_CODE (dest) == ZERO_EXTRACT
2422*38fd1498Szrj 	 || read_modify_subreg_p (dest))
2423*38fd1498Szrj         {
2424*38fd1498Szrj 	  /* These both read and modify the result.  We must handle
2425*38fd1498Szrj              them as writes to get proper dependencies for following
2426*38fd1498Szrj              instructions.  We must handle them as reads to get proper
2427*38fd1498Szrj              dependencies from this to previous instructions.
2428*38fd1498Szrj              Thus we need to call sched_analyze_2.  */
2429*38fd1498Szrj 
2430*38fd1498Szrj 	  sched_analyze_2 (deps, XEXP (dest, 0), insn);
2431*38fd1498Szrj 	}
2432*38fd1498Szrj       if (GET_CODE (dest) == ZERO_EXTRACT)
2433*38fd1498Szrj 	{
2434*38fd1498Szrj 	  /* The second and third arguments are values read by this insn.  */
2435*38fd1498Szrj 	  sched_analyze_2 (deps, XEXP (dest, 1), insn);
2436*38fd1498Szrj 	  sched_analyze_2 (deps, XEXP (dest, 2), insn);
2437*38fd1498Szrj 	}
2438*38fd1498Szrj       dest = XEXP (dest, 0);
2439*38fd1498Szrj     }
2440*38fd1498Szrj 
2441*38fd1498Szrj   if (REG_P (dest))
2442*38fd1498Szrj     {
2443*38fd1498Szrj       int regno = REGNO (dest);
2444*38fd1498Szrj       machine_mode mode = GET_MODE (dest);
2445*38fd1498Szrj 
2446*38fd1498Szrj       sched_analyze_reg (deps, regno, mode, code, insn);
2447*38fd1498Szrj 
2448*38fd1498Szrj #ifdef STACK_REGS
2449*38fd1498Szrj       /* Treat all writes to a stack register as modifying the TOS.  */
2450*38fd1498Szrj       if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2451*38fd1498Szrj 	{
2452*38fd1498Szrj 	  /* Avoid analyzing the same register twice.  */
2453*38fd1498Szrj 	  if (regno != FIRST_STACK_REG)
2454*38fd1498Szrj 	    sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2455*38fd1498Szrj 
2456*38fd1498Szrj 	  add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2457*38fd1498Szrj 			       FIRST_STACK_REG);
2458*38fd1498Szrj 	}
2459*38fd1498Szrj #endif
2460*38fd1498Szrj     }
2461*38fd1498Szrj   else if (MEM_P (dest))
2462*38fd1498Szrj     {
2463*38fd1498Szrj       /* Writing memory.  */
2464*38fd1498Szrj       rtx t = dest;
2465*38fd1498Szrj 
2466*38fd1498Szrj       if (sched_deps_info->use_cselib)
2467*38fd1498Szrj 	{
2468*38fd1498Szrj 	  machine_mode address_mode = get_address_mode (dest);
2469*38fd1498Szrj 
2470*38fd1498Szrj 	  t = shallow_copy_rtx (dest);
2471*38fd1498Szrj 	  cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2472*38fd1498Szrj 				   GET_MODE (t), insn);
2473*38fd1498Szrj 	  XEXP (t, 0)
2474*38fd1498Szrj 	    = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2475*38fd1498Szrj 						insn);
2476*38fd1498Szrj 	}
2477*38fd1498Szrj       t = canon_rtx (t);
2478*38fd1498Szrj 
2479*38fd1498Szrj       /* Pending lists can't get larger with a readonly context.  */
2480*38fd1498Szrj       if (!deps->readonly
2481*38fd1498Szrj           && ((deps->pending_read_list_length + deps->pending_write_list_length)
2482*38fd1498Szrj               >= MAX_PENDING_LIST_LENGTH))
2483*38fd1498Szrj 	{
2484*38fd1498Szrj 	  /* Flush all pending reads and writes to prevent the pending lists
2485*38fd1498Szrj 	     from getting any larger.  Insn scheduling runs too slowly when
2486*38fd1498Szrj 	     these lists get long.  When compiling GCC with itself,
2487*38fd1498Szrj 	     this flush occurs 8 times for sparc, and 10 times for m88k using
2488*38fd1498Szrj 	     the default value of 32.  */
2489*38fd1498Szrj 	  flush_pending_lists (deps, insn, false, true);
2490*38fd1498Szrj 	}
2491*38fd1498Szrj       else
2492*38fd1498Szrj 	{
2493*38fd1498Szrj 	  rtx_insn_list *pending;
2494*38fd1498Szrj 	  rtx_expr_list *pending_mem;
2495*38fd1498Szrj 
2496*38fd1498Szrj 	  pending = deps->pending_read_insns;
2497*38fd1498Szrj 	  pending_mem = deps->pending_read_mems;
2498*38fd1498Szrj 	  while (pending)
2499*38fd1498Szrj 	    {
2500*38fd1498Szrj 	      if (anti_dependence (pending_mem->element (), t)
2501*38fd1498Szrj 		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2502*38fd1498Szrj 		note_mem_dep (t, pending_mem->element (), pending->insn (),
2503*38fd1498Szrj 			      DEP_ANTI);
2504*38fd1498Szrj 
2505*38fd1498Szrj 	      pending = pending->next ();
2506*38fd1498Szrj 	      pending_mem = pending_mem->next ();
2507*38fd1498Szrj 	    }
2508*38fd1498Szrj 
2509*38fd1498Szrj 	  pending = deps->pending_write_insns;
2510*38fd1498Szrj 	  pending_mem = deps->pending_write_mems;
2511*38fd1498Szrj 	  while (pending)
2512*38fd1498Szrj 	    {
2513*38fd1498Szrj 	      if (output_dependence (pending_mem->element (), t)
2514*38fd1498Szrj 		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2515*38fd1498Szrj 		note_mem_dep (t, pending_mem->element (),
2516*38fd1498Szrj 			      pending->insn (),
2517*38fd1498Szrj 			      DEP_OUTPUT);
2518*38fd1498Szrj 
2519*38fd1498Szrj 	      pending = pending->next ();
2520*38fd1498Szrj 	      pending_mem = pending_mem-> next ();
2521*38fd1498Szrj 	    }
2522*38fd1498Szrj 
2523*38fd1498Szrj 	  add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2524*38fd1498Szrj 			       REG_DEP_ANTI, true);
2525*38fd1498Szrj 	  add_dependence_list (insn, deps->pending_jump_insns, 1,
2526*38fd1498Szrj 			       REG_DEP_CONTROL, true);
2527*38fd1498Szrj 
2528*38fd1498Szrj           if (!deps->readonly)
2529*38fd1498Szrj             add_insn_mem_dependence (deps, false, insn, dest);
2530*38fd1498Szrj 	}
2531*38fd1498Szrj       sched_analyze_2 (deps, XEXP (dest, 0), insn);
2532*38fd1498Szrj     }
2533*38fd1498Szrj 
2534*38fd1498Szrj   if (cslr_p && sched_deps_info->finish_lhs)
2535*38fd1498Szrj     sched_deps_info->finish_lhs ();
2536*38fd1498Szrj 
2537*38fd1498Szrj   /* Analyze reads.  */
2538*38fd1498Szrj   if (GET_CODE (x) == SET)
2539*38fd1498Szrj     {
2540*38fd1498Szrj       can_start_lhs_rhs_p = cslr_p;
2541*38fd1498Szrj 
2542*38fd1498Szrj       sched_analyze_2 (deps, SET_SRC (x), insn);
2543*38fd1498Szrj 
2544*38fd1498Szrj       can_start_lhs_rhs_p = false;
2545*38fd1498Szrj     }
2546*38fd1498Szrj }
2547*38fd1498Szrj 
2548*38fd1498Szrj /* Analyze the uses of memory and registers in rtx X in INSN.  */
2549*38fd1498Szrj static void
sched_analyze_2(struct deps_desc * deps,rtx x,rtx_insn * insn)2550*38fd1498Szrj sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2551*38fd1498Szrj {
2552*38fd1498Szrj   int i;
2553*38fd1498Szrj   int j;
2554*38fd1498Szrj   enum rtx_code code;
2555*38fd1498Szrj   const char *fmt;
2556*38fd1498Szrj   bool cslr_p = can_start_lhs_rhs_p;
2557*38fd1498Szrj 
2558*38fd1498Szrj   can_start_lhs_rhs_p = false;
2559*38fd1498Szrj 
2560*38fd1498Szrj   gcc_assert (x);
2561*38fd1498Szrj   if (x == 0)
2562*38fd1498Szrj     return;
2563*38fd1498Szrj 
2564*38fd1498Szrj   if (cslr_p && sched_deps_info->start_rhs)
2565*38fd1498Szrj     sched_deps_info->start_rhs (x);
2566*38fd1498Szrj 
2567*38fd1498Szrj   code = GET_CODE (x);
2568*38fd1498Szrj 
2569*38fd1498Szrj   switch (code)
2570*38fd1498Szrj     {
2571*38fd1498Szrj     CASE_CONST_ANY:
2572*38fd1498Szrj     case SYMBOL_REF:
2573*38fd1498Szrj     case CONST:
2574*38fd1498Szrj     case LABEL_REF:
2575*38fd1498Szrj       /* Ignore constants.  */
2576*38fd1498Szrj       if (cslr_p && sched_deps_info->finish_rhs)
2577*38fd1498Szrj 	sched_deps_info->finish_rhs ();
2578*38fd1498Szrj 
2579*38fd1498Szrj       return;
2580*38fd1498Szrj 
2581*38fd1498Szrj     case CC0:
2582*38fd1498Szrj       if (!HAVE_cc0)
2583*38fd1498Szrj 	gcc_unreachable ();
2584*38fd1498Szrj 
2585*38fd1498Szrj       /* User of CC0 depends on immediately preceding insn.  */
2586*38fd1498Szrj       SCHED_GROUP_P (insn) = 1;
2587*38fd1498Szrj        /* Don't move CC0 setter to another block (it can set up the
2588*38fd1498Szrj         same flag for previous CC0 users which is safe).  */
2589*38fd1498Szrj       CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2590*38fd1498Szrj 
2591*38fd1498Szrj       if (cslr_p && sched_deps_info->finish_rhs)
2592*38fd1498Szrj 	sched_deps_info->finish_rhs ();
2593*38fd1498Szrj 
2594*38fd1498Szrj       return;
2595*38fd1498Szrj 
2596*38fd1498Szrj     case REG:
2597*38fd1498Szrj       {
2598*38fd1498Szrj 	int regno = REGNO (x);
2599*38fd1498Szrj 	machine_mode mode = GET_MODE (x);
2600*38fd1498Szrj 
2601*38fd1498Szrj 	sched_analyze_reg (deps, regno, mode, USE, insn);
2602*38fd1498Szrj 
2603*38fd1498Szrj #ifdef STACK_REGS
2604*38fd1498Szrj       /* Treat all reads of a stack register as modifying the TOS.  */
2605*38fd1498Szrj       if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2606*38fd1498Szrj 	{
2607*38fd1498Szrj 	  /* Avoid analyzing the same register twice.  */
2608*38fd1498Szrj 	  if (regno != FIRST_STACK_REG)
2609*38fd1498Szrj 	    sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2610*38fd1498Szrj 	  sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2611*38fd1498Szrj 	}
2612*38fd1498Szrj #endif
2613*38fd1498Szrj 
2614*38fd1498Szrj 	if (cslr_p && sched_deps_info->finish_rhs)
2615*38fd1498Szrj 	  sched_deps_info->finish_rhs ();
2616*38fd1498Szrj 
2617*38fd1498Szrj 	return;
2618*38fd1498Szrj       }
2619*38fd1498Szrj 
2620*38fd1498Szrj     case MEM:
2621*38fd1498Szrj       {
2622*38fd1498Szrj 	/* Reading memory.  */
2623*38fd1498Szrj 	rtx_insn_list *u;
2624*38fd1498Szrj 	rtx_insn_list *pending;
2625*38fd1498Szrj 	rtx_expr_list *pending_mem;
2626*38fd1498Szrj 	rtx t = x;
2627*38fd1498Szrj 
2628*38fd1498Szrj 	if (sched_deps_info->use_cselib)
2629*38fd1498Szrj 	  {
2630*38fd1498Szrj 	    machine_mode address_mode = get_address_mode (t);
2631*38fd1498Szrj 
2632*38fd1498Szrj 	    t = shallow_copy_rtx (t);
2633*38fd1498Szrj 	    cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2634*38fd1498Szrj 				     GET_MODE (t), insn);
2635*38fd1498Szrj 	    XEXP (t, 0)
2636*38fd1498Szrj 	      = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2637*38fd1498Szrj 						  insn);
2638*38fd1498Szrj 	  }
2639*38fd1498Szrj 
2640*38fd1498Szrj 	if (!DEBUG_INSN_P (insn))
2641*38fd1498Szrj 	  {
2642*38fd1498Szrj 	    t = canon_rtx (t);
2643*38fd1498Szrj 	    pending = deps->pending_read_insns;
2644*38fd1498Szrj 	    pending_mem = deps->pending_read_mems;
2645*38fd1498Szrj 	    while (pending)
2646*38fd1498Szrj 	      {
2647*38fd1498Szrj 		if (read_dependence (pending_mem->element (), t)
2648*38fd1498Szrj 		    && ! sched_insns_conditions_mutex_p (insn,
2649*38fd1498Szrj 							 pending->insn ()))
2650*38fd1498Szrj 		  note_mem_dep (t, pending_mem->element (),
2651*38fd1498Szrj 				pending->insn (),
2652*38fd1498Szrj 				DEP_ANTI);
2653*38fd1498Szrj 
2654*38fd1498Szrj 		pending = pending->next ();
2655*38fd1498Szrj 		pending_mem = pending_mem->next ();
2656*38fd1498Szrj 	      }
2657*38fd1498Szrj 
2658*38fd1498Szrj 	    pending = deps->pending_write_insns;
2659*38fd1498Szrj 	    pending_mem = deps->pending_write_mems;
2660*38fd1498Szrj 	    while (pending)
2661*38fd1498Szrj 	      {
2662*38fd1498Szrj 		if (true_dependence (pending_mem->element (), VOIDmode, t)
2663*38fd1498Szrj 		    && ! sched_insns_conditions_mutex_p (insn,
2664*38fd1498Szrj 							 pending->insn ()))
2665*38fd1498Szrj 		  note_mem_dep (t, pending_mem->element (),
2666*38fd1498Szrj 				pending->insn (),
2667*38fd1498Szrj 				sched_deps_info->generate_spec_deps
2668*38fd1498Szrj 				? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2669*38fd1498Szrj 
2670*38fd1498Szrj 		pending = pending->next ();
2671*38fd1498Szrj 		pending_mem = pending_mem->next ();
2672*38fd1498Szrj 	      }
2673*38fd1498Szrj 
2674*38fd1498Szrj 	    for (u = deps->last_pending_memory_flush; u; u = u->next ())
2675*38fd1498Szrj 	      add_dependence (insn, u->insn (), REG_DEP_ANTI);
2676*38fd1498Szrj 
2677*38fd1498Szrj 	    for (u = deps->pending_jump_insns; u; u = u->next ())
2678*38fd1498Szrj 	      if (deps_may_trap_p (x))
2679*38fd1498Szrj 		{
2680*38fd1498Szrj 		  if ((sched_deps_info->generate_spec_deps)
2681*38fd1498Szrj 		      && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2682*38fd1498Szrj 		    {
2683*38fd1498Szrj 		      ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2684*38fd1498Szrj 					      MAX_DEP_WEAK);
2685*38fd1498Szrj 
2686*38fd1498Szrj 		      note_dep (u->insn (), ds);
2687*38fd1498Szrj 		    }
2688*38fd1498Szrj 		  else
2689*38fd1498Szrj 		    add_dependence (insn, u->insn (), REG_DEP_CONTROL);
2690*38fd1498Szrj 		}
2691*38fd1498Szrj 	  }
2692*38fd1498Szrj 
2693*38fd1498Szrj 	/* Always add these dependencies to pending_reads, since
2694*38fd1498Szrj 	   this insn may be followed by a write.  */
2695*38fd1498Szrj 	if (!deps->readonly)
2696*38fd1498Szrj 	  {
2697*38fd1498Szrj 	    if ((deps->pending_read_list_length
2698*38fd1498Szrj 		 + deps->pending_write_list_length)
2699*38fd1498Szrj 		>= MAX_PENDING_LIST_LENGTH
2700*38fd1498Szrj 		&& !DEBUG_INSN_P (insn))
2701*38fd1498Szrj 	      flush_pending_lists (deps, insn, true, true);
2702*38fd1498Szrj 	    add_insn_mem_dependence (deps, true, insn, x);
2703*38fd1498Szrj 	  }
2704*38fd1498Szrj 
2705*38fd1498Szrj 	sched_analyze_2 (deps, XEXP (x, 0), insn);
2706*38fd1498Szrj 
2707*38fd1498Szrj 	if (cslr_p && sched_deps_info->finish_rhs)
2708*38fd1498Szrj 	  sched_deps_info->finish_rhs ();
2709*38fd1498Szrj 
2710*38fd1498Szrj 	return;
2711*38fd1498Szrj       }
2712*38fd1498Szrj 
2713*38fd1498Szrj     /* Force pending stores to memory in case a trap handler needs them.
2714*38fd1498Szrj        Also force pending loads from memory; loads and stores can segfault
2715*38fd1498Szrj        and the signal handler won't be triggered if the trap insn was moved
2716*38fd1498Szrj        above load or store insn.  */
2717*38fd1498Szrj     case TRAP_IF:
2718*38fd1498Szrj       flush_pending_lists (deps, insn, true, true);
2719*38fd1498Szrj       break;
2720*38fd1498Szrj 
2721*38fd1498Szrj     case PREFETCH:
2722*38fd1498Szrj       if (PREFETCH_SCHEDULE_BARRIER_P (x))
2723*38fd1498Szrj 	reg_pending_barrier = TRUE_BARRIER;
2724*38fd1498Szrj       /* Prefetch insn contains addresses only.  So if the prefetch
2725*38fd1498Szrj 	 address has no registers, there will be no dependencies on
2726*38fd1498Szrj 	 the prefetch insn.  This is wrong with result code
2727*38fd1498Szrj 	 correctness point of view as such prefetch can be moved below
2728*38fd1498Szrj 	 a jump insn which usually generates MOVE_BARRIER preventing
2729*38fd1498Szrj 	 to move insns containing registers or memories through the
2730*38fd1498Szrj 	 barrier.  It is also wrong with generated code performance
2731*38fd1498Szrj 	 point of view as prefetch withouth dependecies will have a
2732*38fd1498Szrj 	 tendency to be issued later instead of earlier.  It is hard
2733*38fd1498Szrj 	 to generate accurate dependencies for prefetch insns as
2734*38fd1498Szrj 	 prefetch has only the start address but it is better to have
2735*38fd1498Szrj 	 something than nothing.  */
2736*38fd1498Szrj       if (!deps->readonly)
2737*38fd1498Szrj 	{
2738*38fd1498Szrj 	  rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2739*38fd1498Szrj 	  if (sched_deps_info->use_cselib)
2740*38fd1498Szrj 	    cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2741*38fd1498Szrj 	  add_insn_mem_dependence (deps, true, insn, x);
2742*38fd1498Szrj 	}
2743*38fd1498Szrj       break;
2744*38fd1498Szrj 
2745*38fd1498Szrj     case UNSPEC_VOLATILE:
2746*38fd1498Szrj       flush_pending_lists (deps, insn, true, true);
2747*38fd1498Szrj       /* FALLTHRU */
2748*38fd1498Szrj 
2749*38fd1498Szrj     case ASM_OPERANDS:
2750*38fd1498Szrj     case ASM_INPUT:
2751*38fd1498Szrj       {
2752*38fd1498Szrj 	/* Traditional and volatile asm instructions must be considered to use
2753*38fd1498Szrj 	   and clobber all hard registers, all pseudo-registers and all of
2754*38fd1498Szrj 	   memory.  So must TRAP_IF and UNSPEC_VOLATILE operations.
2755*38fd1498Szrj 
2756*38fd1498Szrj 	   Consider for instance a volatile asm that changes the fpu rounding
2757*38fd1498Szrj 	   mode.  An insn should not be moved across this even if it only uses
2758*38fd1498Szrj 	   pseudo-regs because it might give an incorrectly rounded result.  */
2759*38fd1498Szrj 	if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2760*38fd1498Szrj 	    && !DEBUG_INSN_P (insn))
2761*38fd1498Szrj 	  reg_pending_barrier = TRUE_BARRIER;
2762*38fd1498Szrj 
2763*38fd1498Szrj 	/* For all ASM_OPERANDS, we must traverse the vector of input operands.
2764*38fd1498Szrj 	   We can not just fall through here since then we would be confused
2765*38fd1498Szrj 	   by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2766*38fd1498Szrj 	   traditional asms unlike their normal usage.  */
2767*38fd1498Szrj 
2768*38fd1498Szrj 	if (code == ASM_OPERANDS)
2769*38fd1498Szrj 	  {
2770*38fd1498Szrj 	    for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2771*38fd1498Szrj 	      sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2772*38fd1498Szrj 
2773*38fd1498Szrj 	    if (cslr_p && sched_deps_info->finish_rhs)
2774*38fd1498Szrj 	      sched_deps_info->finish_rhs ();
2775*38fd1498Szrj 
2776*38fd1498Szrj 	    return;
2777*38fd1498Szrj 	  }
2778*38fd1498Szrj 	break;
2779*38fd1498Szrj       }
2780*38fd1498Szrj 
2781*38fd1498Szrj     case PRE_DEC:
2782*38fd1498Szrj     case POST_DEC:
2783*38fd1498Szrj     case PRE_INC:
2784*38fd1498Szrj     case POST_INC:
2785*38fd1498Szrj       /* These both read and modify the result.  We must handle them as writes
2786*38fd1498Szrj          to get proper dependencies for following instructions.  We must handle
2787*38fd1498Szrj          them as reads to get proper dependencies from this to previous
2788*38fd1498Szrj          instructions.  Thus we need to pass them to both sched_analyze_1
2789*38fd1498Szrj          and sched_analyze_2.  We must call sched_analyze_2 first in order
2790*38fd1498Szrj          to get the proper antecedent for the read.  */
2791*38fd1498Szrj       sched_analyze_2 (deps, XEXP (x, 0), insn);
2792*38fd1498Szrj       sched_analyze_1 (deps, x, insn);
2793*38fd1498Szrj 
2794*38fd1498Szrj       if (cslr_p && sched_deps_info->finish_rhs)
2795*38fd1498Szrj 	sched_deps_info->finish_rhs ();
2796*38fd1498Szrj 
2797*38fd1498Szrj       return;
2798*38fd1498Szrj 
2799*38fd1498Szrj     case POST_MODIFY:
2800*38fd1498Szrj     case PRE_MODIFY:
2801*38fd1498Szrj       /* op0 = op0 + op1 */
2802*38fd1498Szrj       sched_analyze_2 (deps, XEXP (x, 0), insn);
2803*38fd1498Szrj       sched_analyze_2 (deps, XEXP (x, 1), insn);
2804*38fd1498Szrj       sched_analyze_1 (deps, x, insn);
2805*38fd1498Szrj 
2806*38fd1498Szrj       if (cslr_p && sched_deps_info->finish_rhs)
2807*38fd1498Szrj 	sched_deps_info->finish_rhs ();
2808*38fd1498Szrj 
2809*38fd1498Szrj       return;
2810*38fd1498Szrj 
2811*38fd1498Szrj     default:
2812*38fd1498Szrj       break;
2813*38fd1498Szrj     }
2814*38fd1498Szrj 
2815*38fd1498Szrj   /* Other cases: walk the insn.  */
2816*38fd1498Szrj   fmt = GET_RTX_FORMAT (code);
2817*38fd1498Szrj   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2818*38fd1498Szrj     {
2819*38fd1498Szrj       if (fmt[i] == 'e')
2820*38fd1498Szrj 	sched_analyze_2 (deps, XEXP (x, i), insn);
2821*38fd1498Szrj       else if (fmt[i] == 'E')
2822*38fd1498Szrj 	for (j = 0; j < XVECLEN (x, i); j++)
2823*38fd1498Szrj 	  sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2824*38fd1498Szrj     }
2825*38fd1498Szrj 
2826*38fd1498Szrj   if (cslr_p && sched_deps_info->finish_rhs)
2827*38fd1498Szrj     sched_deps_info->finish_rhs ();
2828*38fd1498Szrj }
2829*38fd1498Szrj 
2830*38fd1498Szrj /* Try to group two fusible insns together to prevent scheduler
2831*38fd1498Szrj    from scheduling them apart.  */
2832*38fd1498Szrj 
2833*38fd1498Szrj static void
sched_macro_fuse_insns(rtx_insn * insn)2834*38fd1498Szrj sched_macro_fuse_insns (rtx_insn *insn)
2835*38fd1498Szrj {
2836*38fd1498Szrj   rtx_insn *prev;
2837*38fd1498Szrj   /* No target hook would return true for debug insn as any of the
2838*38fd1498Szrj      hook operand, and with very large sequences of only debug insns
2839*38fd1498Szrj      where on each we call sched_macro_fuse_insns it has quadratic
2840*38fd1498Szrj      compile time complexity.  */
2841*38fd1498Szrj   if (DEBUG_INSN_P (insn))
2842*38fd1498Szrj     return;
2843*38fd1498Szrj   prev = prev_nonnote_nondebug_insn (insn);
2844*38fd1498Szrj   if (!prev)
2845*38fd1498Szrj     return;
2846*38fd1498Szrj 
2847*38fd1498Szrj   if (any_condjump_p (insn))
2848*38fd1498Szrj     {
2849*38fd1498Szrj       unsigned int condreg1, condreg2;
2850*38fd1498Szrj       rtx cc_reg_1;
2851*38fd1498Szrj       targetm.fixed_condition_code_regs (&condreg1, &condreg2);
2852*38fd1498Szrj       cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2853*38fd1498Szrj       if (reg_referenced_p (cc_reg_1, PATTERN (insn))
2854*38fd1498Szrj 	  && modified_in_p (cc_reg_1, prev))
2855*38fd1498Szrj 	{
2856*38fd1498Szrj 	  if (targetm.sched.macro_fusion_pair_p (prev, insn))
2857*38fd1498Szrj 	    SCHED_GROUP_P (insn) = 1;
2858*38fd1498Szrj 	  return;
2859*38fd1498Szrj 	}
2860*38fd1498Szrj     }
2861*38fd1498Szrj 
2862*38fd1498Szrj   if (single_set (insn) && single_set (prev))
2863*38fd1498Szrj     {
2864*38fd1498Szrj       if (targetm.sched.macro_fusion_pair_p (prev, insn))
2865*38fd1498Szrj 	SCHED_GROUP_P (insn) = 1;
2866*38fd1498Szrj     }
2867*38fd1498Szrj }
2868*38fd1498Szrj 
2869*38fd1498Szrj /* Get the implicit reg pending clobbers for INSN and save them in TEMP.  */
2870*38fd1498Szrj void
get_implicit_reg_pending_clobbers(HARD_REG_SET * temp,rtx_insn * insn)2871*38fd1498Szrj get_implicit_reg_pending_clobbers (HARD_REG_SET *temp, rtx_insn *insn)
2872*38fd1498Szrj {
2873*38fd1498Szrj   extract_insn (insn);
2874*38fd1498Szrj   preprocess_constraints (insn);
2875*38fd1498Szrj   alternative_mask preferred = get_preferred_alternatives (insn);
2876*38fd1498Szrj   ira_implicitly_set_insn_hard_regs (temp, preferred);
2877*38fd1498Szrj   AND_COMPL_HARD_REG_SET (*temp, ira_no_alloc_regs);
2878*38fd1498Szrj }
2879*38fd1498Szrj 
2880*38fd1498Szrj /* Analyze an INSN with pattern X to find all dependencies.  */
2881*38fd1498Szrj static void
sched_analyze_insn(struct deps_desc * deps,rtx x,rtx_insn * insn)2882*38fd1498Szrj sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
2883*38fd1498Szrj {
2884*38fd1498Szrj   RTX_CODE code = GET_CODE (x);
2885*38fd1498Szrj   rtx link;
2886*38fd1498Szrj   unsigned i;
2887*38fd1498Szrj   reg_set_iterator rsi;
2888*38fd1498Szrj 
2889*38fd1498Szrj   if (! reload_completed)
2890*38fd1498Szrj     {
2891*38fd1498Szrj       HARD_REG_SET temp;
2892*38fd1498Szrj       get_implicit_reg_pending_clobbers (&temp, insn);
2893*38fd1498Szrj       IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2894*38fd1498Szrj     }
2895*38fd1498Szrj 
2896*38fd1498Szrj   can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2897*38fd1498Szrj 			 && code == SET);
2898*38fd1498Szrj 
2899*38fd1498Szrj   /* Group compare and branch insns for macro-fusion.  */
2900*38fd1498Szrj   if (!deps->readonly
2901*38fd1498Szrj       && targetm.sched.macro_fusion_p
2902*38fd1498Szrj       && targetm.sched.macro_fusion_p ())
2903*38fd1498Szrj     sched_macro_fuse_insns (insn);
2904*38fd1498Szrj 
2905*38fd1498Szrj   if (may_trap_p (x))
2906*38fd1498Szrj     /* Avoid moving trapping instructions across function calls that might
2907*38fd1498Szrj        not always return.  */
2908*38fd1498Szrj     add_dependence_list (insn, deps->last_function_call_may_noreturn,
2909*38fd1498Szrj 			 1, REG_DEP_ANTI, true);
2910*38fd1498Szrj 
2911*38fd1498Szrj   /* We must avoid creating a situation in which two successors of the
2912*38fd1498Szrj      current block have different unwind info after scheduling.  If at any
2913*38fd1498Szrj      point the two paths re-join this leads to incorrect unwind info.  */
2914*38fd1498Szrj   /* ??? There are certain situations involving a forced frame pointer in
2915*38fd1498Szrj      which, with extra effort, we could fix up the unwind info at a later
2916*38fd1498Szrj      CFG join.  However, it seems better to notice these cases earlier
2917*38fd1498Szrj      during prologue generation and avoid marking the frame pointer setup
2918*38fd1498Szrj      as frame-related at all.  */
2919*38fd1498Szrj   if (RTX_FRAME_RELATED_P (insn))
2920*38fd1498Szrj     {
2921*38fd1498Szrj       /* Make sure prologue insn is scheduled before next jump.  */
2922*38fd1498Szrj       deps->sched_before_next_jump
2923*38fd1498Szrj 	= alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2924*38fd1498Szrj 
2925*38fd1498Szrj       /* Make sure epilogue insn is scheduled after preceding jumps.  */
2926*38fd1498Szrj       add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2927*38fd1498Szrj 			   REG_DEP_ANTI, true);
2928*38fd1498Szrj       add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2929*38fd1498Szrj 			   true);
2930*38fd1498Szrj     }
2931*38fd1498Szrj 
2932*38fd1498Szrj   if (code == COND_EXEC)
2933*38fd1498Szrj     {
2934*38fd1498Szrj       sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2935*38fd1498Szrj 
2936*38fd1498Szrj       /* ??? Should be recording conditions so we reduce the number of
2937*38fd1498Szrj 	 false dependencies.  */
2938*38fd1498Szrj       x = COND_EXEC_CODE (x);
2939*38fd1498Szrj       code = GET_CODE (x);
2940*38fd1498Szrj     }
2941*38fd1498Szrj   if (code == SET || code == CLOBBER)
2942*38fd1498Szrj     {
2943*38fd1498Szrj       sched_analyze_1 (deps, x, insn);
2944*38fd1498Szrj 
2945*38fd1498Szrj       /* Bare clobber insns are used for letting life analysis, reg-stack
2946*38fd1498Szrj 	 and others know that a value is dead.  Depend on the last call
2947*38fd1498Szrj 	 instruction so that reg-stack won't get confused.  */
2948*38fd1498Szrj       if (code == CLOBBER)
2949*38fd1498Szrj 	add_dependence_list (insn, deps->last_function_call, 1,
2950*38fd1498Szrj 			     REG_DEP_OUTPUT, true);
2951*38fd1498Szrj     }
2952*38fd1498Szrj   else if (code == PARALLEL)
2953*38fd1498Szrj     {
2954*38fd1498Szrj       for (i = XVECLEN (x, 0); i--;)
2955*38fd1498Szrj 	{
2956*38fd1498Szrj 	  rtx sub = XVECEXP (x, 0, i);
2957*38fd1498Szrj 	  code = GET_CODE (sub);
2958*38fd1498Szrj 
2959*38fd1498Szrj 	  if (code == COND_EXEC)
2960*38fd1498Szrj 	    {
2961*38fd1498Szrj 	      sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2962*38fd1498Szrj 	      sub = COND_EXEC_CODE (sub);
2963*38fd1498Szrj 	      code = GET_CODE (sub);
2964*38fd1498Szrj 	    }
2965*38fd1498Szrj 	  if (code == SET || code == CLOBBER)
2966*38fd1498Szrj 	    sched_analyze_1 (deps, sub, insn);
2967*38fd1498Szrj 	  else
2968*38fd1498Szrj 	    sched_analyze_2 (deps, sub, insn);
2969*38fd1498Szrj 	}
2970*38fd1498Szrj     }
2971*38fd1498Szrj   else
2972*38fd1498Szrj     sched_analyze_2 (deps, x, insn);
2973*38fd1498Szrj 
2974*38fd1498Szrj   /* Mark registers CLOBBERED or used by called function.  */
2975*38fd1498Szrj   if (CALL_P (insn))
2976*38fd1498Szrj     {
2977*38fd1498Szrj       for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2978*38fd1498Szrj 	{
2979*38fd1498Szrj 	  if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2980*38fd1498Szrj 	    sched_analyze_1 (deps, XEXP (link, 0), insn);
2981*38fd1498Szrj 	  else if (GET_CODE (XEXP (link, 0)) != SET)
2982*38fd1498Szrj 	    sched_analyze_2 (deps, XEXP (link, 0), insn);
2983*38fd1498Szrj 	}
2984*38fd1498Szrj       /* Don't schedule anything after a tail call, tail call needs
2985*38fd1498Szrj 	 to use at least all call-saved registers.  */
2986*38fd1498Szrj       if (SIBLING_CALL_P (insn))
2987*38fd1498Szrj 	reg_pending_barrier = TRUE_BARRIER;
2988*38fd1498Szrj       else if (find_reg_note (insn, REG_SETJMP, NULL))
2989*38fd1498Szrj 	reg_pending_barrier = MOVE_BARRIER;
2990*38fd1498Szrj     }
2991*38fd1498Szrj 
2992*38fd1498Szrj   if (JUMP_P (insn))
2993*38fd1498Szrj     {
2994*38fd1498Szrj       rtx_insn *next = next_nonnote_nondebug_insn (insn);
2995*38fd1498Szrj       if (next && BARRIER_P (next))
2996*38fd1498Szrj 	reg_pending_barrier = MOVE_BARRIER;
2997*38fd1498Szrj       else
2998*38fd1498Szrj 	{
2999*38fd1498Szrj 	  rtx_insn_list *pending;
3000*38fd1498Szrj 	  rtx_expr_list *pending_mem;
3001*38fd1498Szrj 
3002*38fd1498Szrj           if (sched_deps_info->compute_jump_reg_dependencies)
3003*38fd1498Szrj             {
3004*38fd1498Szrj               (*sched_deps_info->compute_jump_reg_dependencies)
3005*38fd1498Szrj 		(insn, reg_pending_control_uses);
3006*38fd1498Szrj 
3007*38fd1498Szrj               /* Make latency of jump equal to 0 by using anti-dependence.  */
3008*38fd1498Szrj               EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3009*38fd1498Szrj                 {
3010*38fd1498Szrj                   struct deps_reg *reg_last = &deps->reg_last[i];
3011*38fd1498Szrj                   add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
3012*38fd1498Szrj 				       false);
3013*38fd1498Szrj                   add_dependence_list (insn, reg_last->implicit_sets,
3014*38fd1498Szrj 				       0, REG_DEP_ANTI, false);
3015*38fd1498Szrj                   add_dependence_list (insn, reg_last->clobbers, 0,
3016*38fd1498Szrj 				       REG_DEP_ANTI, false);
3017*38fd1498Szrj                 }
3018*38fd1498Szrj             }
3019*38fd1498Szrj 
3020*38fd1498Szrj 	  /* All memory writes and volatile reads must happen before the
3021*38fd1498Szrj 	     jump.  Non-volatile reads must happen before the jump iff
3022*38fd1498Szrj 	     the result is needed by the above register used mask.  */
3023*38fd1498Szrj 
3024*38fd1498Szrj 	  pending = deps->pending_write_insns;
3025*38fd1498Szrj 	  pending_mem = deps->pending_write_mems;
3026*38fd1498Szrj 	  while (pending)
3027*38fd1498Szrj 	    {
3028*38fd1498Szrj 	      if (! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3029*38fd1498Szrj 		add_dependence (insn, pending->insn (),
3030*38fd1498Szrj 				REG_DEP_OUTPUT);
3031*38fd1498Szrj 	      pending = pending->next ();
3032*38fd1498Szrj 	      pending_mem = pending_mem->next ();
3033*38fd1498Szrj 	    }
3034*38fd1498Szrj 
3035*38fd1498Szrj 	  pending = deps->pending_read_insns;
3036*38fd1498Szrj 	  pending_mem = deps->pending_read_mems;
3037*38fd1498Szrj 	  while (pending)
3038*38fd1498Szrj 	    {
3039*38fd1498Szrj 	      if (MEM_VOLATILE_P (pending_mem->element ())
3040*38fd1498Szrj 		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3041*38fd1498Szrj 		add_dependence (insn, pending->insn (),
3042*38fd1498Szrj 				REG_DEP_OUTPUT);
3043*38fd1498Szrj 	      pending = pending->next ();
3044*38fd1498Szrj 	      pending_mem = pending_mem->next ();
3045*38fd1498Szrj 	    }
3046*38fd1498Szrj 
3047*38fd1498Szrj 	  add_dependence_list (insn, deps->last_pending_memory_flush, 1,
3048*38fd1498Szrj 			       REG_DEP_ANTI, true);
3049*38fd1498Szrj 	  add_dependence_list (insn, deps->pending_jump_insns, 1,
3050*38fd1498Szrj 			       REG_DEP_ANTI, true);
3051*38fd1498Szrj 	}
3052*38fd1498Szrj     }
3053*38fd1498Szrj 
3054*38fd1498Szrj   /* If this instruction can throw an exception, then moving it changes
3055*38fd1498Szrj      where block boundaries fall.  This is mighty confusing elsewhere.
3056*38fd1498Szrj      Therefore, prevent such an instruction from being moved.  Same for
3057*38fd1498Szrj      non-jump instructions that define block boundaries.
3058*38fd1498Szrj      ??? Unclear whether this is still necessary in EBB mode.  If not,
3059*38fd1498Szrj      add_branch_dependences should be adjusted for RGN mode instead.  */
3060*38fd1498Szrj   if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3061*38fd1498Szrj       || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
3062*38fd1498Szrj     reg_pending_barrier = MOVE_BARRIER;
3063*38fd1498Szrj 
3064*38fd1498Szrj   if (sched_pressure != SCHED_PRESSURE_NONE)
3065*38fd1498Szrj     {
3066*38fd1498Szrj       setup_insn_reg_uses (deps, insn);
3067*38fd1498Szrj       init_insn_reg_pressure_info (insn);
3068*38fd1498Szrj     }
3069*38fd1498Szrj 
3070*38fd1498Szrj   /* Add register dependencies for insn.  */
3071*38fd1498Szrj   if (DEBUG_INSN_P (insn))
3072*38fd1498Szrj     {
3073*38fd1498Szrj       rtx_insn *prev = deps->last_debug_insn;
3074*38fd1498Szrj       rtx_insn_list *u;
3075*38fd1498Szrj 
3076*38fd1498Szrj       if (!deps->readonly)
3077*38fd1498Szrj 	deps->last_debug_insn = insn;
3078*38fd1498Szrj 
3079*38fd1498Szrj       if (prev)
3080*38fd1498Szrj 	add_dependence (insn, prev, REG_DEP_ANTI);
3081*38fd1498Szrj 
3082*38fd1498Szrj       add_dependence_list (insn, deps->last_function_call, 1,
3083*38fd1498Szrj 			   REG_DEP_ANTI, false);
3084*38fd1498Szrj 
3085*38fd1498Szrj       if (!sel_sched_p ())
3086*38fd1498Szrj 	for (u = deps->last_pending_memory_flush; u; u = u->next ())
3087*38fd1498Szrj 	  add_dependence (insn, u->insn (), REG_DEP_ANTI);
3088*38fd1498Szrj 
3089*38fd1498Szrj       EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3090*38fd1498Szrj 	{
3091*38fd1498Szrj 	  struct deps_reg *reg_last = &deps->reg_last[i];
3092*38fd1498Szrj 	  add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
3093*38fd1498Szrj 	  /* There's no point in making REG_DEP_CONTROL dependencies for
3094*38fd1498Szrj 	     debug insns.  */
3095*38fd1498Szrj 	  add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3096*38fd1498Szrj 			       false);
3097*38fd1498Szrj 
3098*38fd1498Szrj 	  if (!deps->readonly)
3099*38fd1498Szrj 	    reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3100*38fd1498Szrj 	}
3101*38fd1498Szrj       CLEAR_REG_SET (reg_pending_uses);
3102*38fd1498Szrj 
3103*38fd1498Szrj       /* Quite often, a debug insn will refer to stuff in the
3104*38fd1498Szrj 	 previous instruction, but the reason we want this
3105*38fd1498Szrj 	 dependency here is to make sure the scheduler doesn't
3106*38fd1498Szrj 	 gratuitously move a debug insn ahead.  This could dirty
3107*38fd1498Szrj 	 DF flags and cause additional analysis that wouldn't have
3108*38fd1498Szrj 	 occurred in compilation without debug insns, and such
3109*38fd1498Szrj 	 additional analysis can modify the generated code.  */
3110*38fd1498Szrj       prev = PREV_INSN (insn);
3111*38fd1498Szrj 
3112*38fd1498Szrj       if (prev && NONDEBUG_INSN_P (prev))
3113*38fd1498Szrj 	add_dependence (insn, prev, REG_DEP_ANTI);
3114*38fd1498Szrj     }
3115*38fd1498Szrj   else
3116*38fd1498Szrj     {
3117*38fd1498Szrj       regset_head set_or_clobbered;
3118*38fd1498Szrj 
3119*38fd1498Szrj       EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3120*38fd1498Szrj 	{
3121*38fd1498Szrj 	  struct deps_reg *reg_last = &deps->reg_last[i];
3122*38fd1498Szrj 	  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3123*38fd1498Szrj 	  add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3124*38fd1498Szrj 			       false);
3125*38fd1498Szrj 	  add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3126*38fd1498Szrj 			       false);
3127*38fd1498Szrj 
3128*38fd1498Szrj 	  if (!deps->readonly)
3129*38fd1498Szrj 	    {
3130*38fd1498Szrj 	      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3131*38fd1498Szrj 	      reg_last->uses_length++;
3132*38fd1498Szrj 	    }
3133*38fd1498Szrj 	}
3134*38fd1498Szrj 
3135*38fd1498Szrj       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3136*38fd1498Szrj 	if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3137*38fd1498Szrj 	  {
3138*38fd1498Szrj 	    struct deps_reg *reg_last = &deps->reg_last[i];
3139*38fd1498Szrj 	    add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3140*38fd1498Szrj 	    add_dependence_list (insn, reg_last->implicit_sets, 0,
3141*38fd1498Szrj 				 REG_DEP_ANTI, false);
3142*38fd1498Szrj 	    add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3143*38fd1498Szrj 				 false);
3144*38fd1498Szrj 
3145*38fd1498Szrj 	    if (!deps->readonly)
3146*38fd1498Szrj 	      {
3147*38fd1498Szrj 		reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3148*38fd1498Szrj 		reg_last->uses_length++;
3149*38fd1498Szrj 	      }
3150*38fd1498Szrj 	  }
3151*38fd1498Szrj 
3152*38fd1498Szrj       if (targetm.sched.exposed_pipeline)
3153*38fd1498Szrj 	{
3154*38fd1498Szrj 	  INIT_REG_SET (&set_or_clobbered);
3155*38fd1498Szrj 	  bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3156*38fd1498Szrj 		      reg_pending_sets);
3157*38fd1498Szrj 	  EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3158*38fd1498Szrj 	    {
3159*38fd1498Szrj 	      struct deps_reg *reg_last = &deps->reg_last[i];
3160*38fd1498Szrj 	      rtx list;
3161*38fd1498Szrj 	      for (list = reg_last->uses; list; list = XEXP (list, 1))
3162*38fd1498Szrj 		{
3163*38fd1498Szrj 		  rtx other = XEXP (list, 0);
3164*38fd1498Szrj 		  if (INSN_CACHED_COND (other) != const_true_rtx
3165*38fd1498Szrj 		      && refers_to_regno_p (i, INSN_CACHED_COND (other)))
3166*38fd1498Szrj 		    INSN_CACHED_COND (other) = const_true_rtx;
3167*38fd1498Szrj 		}
3168*38fd1498Szrj 	    }
3169*38fd1498Szrj 	}
3170*38fd1498Szrj 
3171*38fd1498Szrj       /* If the current insn is conditional, we can't free any
3172*38fd1498Szrj 	 of the lists.  */
3173*38fd1498Szrj       if (sched_has_condition_p (insn))
3174*38fd1498Szrj 	{
3175*38fd1498Szrj 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3176*38fd1498Szrj 	    {
3177*38fd1498Szrj 	      struct deps_reg *reg_last = &deps->reg_last[i];
3178*38fd1498Szrj 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3179*38fd1498Szrj 				   false);
3180*38fd1498Szrj 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3181*38fd1498Szrj 				   REG_DEP_ANTI, false);
3182*38fd1498Szrj 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3183*38fd1498Szrj 				   false);
3184*38fd1498Szrj 	      add_dependence_list (insn, reg_last->control_uses, 0,
3185*38fd1498Szrj 				   REG_DEP_CONTROL, false);
3186*38fd1498Szrj 
3187*38fd1498Szrj 	      if (!deps->readonly)
3188*38fd1498Szrj 		{
3189*38fd1498Szrj 		  reg_last->clobbers
3190*38fd1498Szrj 		    = alloc_INSN_LIST (insn, reg_last->clobbers);
3191*38fd1498Szrj 		  reg_last->clobbers_length++;
3192*38fd1498Szrj 		}
3193*38fd1498Szrj 	    }
3194*38fd1498Szrj 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3195*38fd1498Szrj 	    {
3196*38fd1498Szrj 	      struct deps_reg *reg_last = &deps->reg_last[i];
3197*38fd1498Szrj 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3198*38fd1498Szrj 				   false);
3199*38fd1498Szrj 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3200*38fd1498Szrj 				   REG_DEP_ANTI, false);
3201*38fd1498Szrj 	      add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3202*38fd1498Szrj 				   false);
3203*38fd1498Szrj 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3204*38fd1498Szrj 				   false);
3205*38fd1498Szrj 	      add_dependence_list (insn, reg_last->control_uses, 0,
3206*38fd1498Szrj 				   REG_DEP_CONTROL, false);
3207*38fd1498Szrj 
3208*38fd1498Szrj 	      if (!deps->readonly)
3209*38fd1498Szrj 		reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3210*38fd1498Szrj 	    }
3211*38fd1498Szrj 	}
3212*38fd1498Szrj       else
3213*38fd1498Szrj 	{
3214*38fd1498Szrj 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3215*38fd1498Szrj 	    {
3216*38fd1498Szrj 	      struct deps_reg *reg_last = &deps->reg_last[i];
3217*38fd1498Szrj 	      if (reg_last->uses_length >= MAX_PENDING_LIST_LENGTH
3218*38fd1498Szrj 		  || reg_last->clobbers_length >= MAX_PENDING_LIST_LENGTH)
3219*38fd1498Szrj 		{
3220*38fd1498Szrj 		  add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3221*38fd1498Szrj 						REG_DEP_OUTPUT, false);
3222*38fd1498Szrj 		  add_dependence_list_and_free (deps, insn,
3223*38fd1498Szrj 						&reg_last->implicit_sets, 0,
3224*38fd1498Szrj 						REG_DEP_ANTI, false);
3225*38fd1498Szrj 		  add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3226*38fd1498Szrj 						REG_DEP_ANTI, false);
3227*38fd1498Szrj 		  add_dependence_list_and_free (deps, insn,
3228*38fd1498Szrj 						&reg_last->control_uses, 0,
3229*38fd1498Szrj 						REG_DEP_ANTI, false);
3230*38fd1498Szrj 		  add_dependence_list_and_free (deps, insn,
3231*38fd1498Szrj 						&reg_last->clobbers, 0,
3232*38fd1498Szrj 						REG_DEP_OUTPUT, false);
3233*38fd1498Szrj 
3234*38fd1498Szrj 		  if (!deps->readonly)
3235*38fd1498Szrj 		    {
3236*38fd1498Szrj 		      reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3237*38fd1498Szrj 		      reg_last->clobbers_length = 0;
3238*38fd1498Szrj 		      reg_last->uses_length = 0;
3239*38fd1498Szrj 		    }
3240*38fd1498Szrj 		}
3241*38fd1498Szrj 	      else
3242*38fd1498Szrj 		{
3243*38fd1498Szrj 		  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3244*38fd1498Szrj 				       false);
3245*38fd1498Szrj 		  add_dependence_list (insn, reg_last->implicit_sets, 0,
3246*38fd1498Szrj 				       REG_DEP_ANTI, false);
3247*38fd1498Szrj 		  add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3248*38fd1498Szrj 				       false);
3249*38fd1498Szrj 		  add_dependence_list (insn, reg_last->control_uses, 0,
3250*38fd1498Szrj 				       REG_DEP_CONTROL, false);
3251*38fd1498Szrj 		}
3252*38fd1498Szrj 
3253*38fd1498Szrj 	      if (!deps->readonly)
3254*38fd1498Szrj 		{
3255*38fd1498Szrj 		  reg_last->clobbers_length++;
3256*38fd1498Szrj 		  reg_last->clobbers
3257*38fd1498Szrj 		    = alloc_INSN_LIST (insn, reg_last->clobbers);
3258*38fd1498Szrj 		}
3259*38fd1498Szrj 	    }
3260*38fd1498Szrj 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3261*38fd1498Szrj 	    {
3262*38fd1498Szrj 	      struct deps_reg *reg_last = &deps->reg_last[i];
3263*38fd1498Szrj 
3264*38fd1498Szrj 	      add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3265*38fd1498Szrj 					    REG_DEP_OUTPUT, false);
3266*38fd1498Szrj 	      add_dependence_list_and_free (deps, insn,
3267*38fd1498Szrj 					    &reg_last->implicit_sets,
3268*38fd1498Szrj 					    0, REG_DEP_ANTI, false);
3269*38fd1498Szrj 	      add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3270*38fd1498Szrj 					    REG_DEP_OUTPUT, false);
3271*38fd1498Szrj 	      add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3272*38fd1498Szrj 					    REG_DEP_ANTI, false);
3273*38fd1498Szrj 	      add_dependence_list (insn, reg_last->control_uses, 0,
3274*38fd1498Szrj 				   REG_DEP_CONTROL, false);
3275*38fd1498Szrj 
3276*38fd1498Szrj 	      if (!deps->readonly)
3277*38fd1498Szrj 		{
3278*38fd1498Szrj 		  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3279*38fd1498Szrj 		  reg_last->uses_length = 0;
3280*38fd1498Szrj 		  reg_last->clobbers_length = 0;
3281*38fd1498Szrj 		}
3282*38fd1498Szrj 	    }
3283*38fd1498Szrj 	}
3284*38fd1498Szrj       if (!deps->readonly)
3285*38fd1498Szrj 	{
3286*38fd1498Szrj 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3287*38fd1498Szrj 	    {
3288*38fd1498Szrj 	      struct deps_reg *reg_last = &deps->reg_last[i];
3289*38fd1498Szrj 	      reg_last->control_uses
3290*38fd1498Szrj 		= alloc_INSN_LIST (insn, reg_last->control_uses);
3291*38fd1498Szrj 	    }
3292*38fd1498Szrj 	}
3293*38fd1498Szrj     }
3294*38fd1498Szrj 
3295*38fd1498Szrj   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3296*38fd1498Szrj     if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3297*38fd1498Szrj       {
3298*38fd1498Szrj 	struct deps_reg *reg_last = &deps->reg_last[i];
3299*38fd1498Szrj 	add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3300*38fd1498Szrj 	add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3301*38fd1498Szrj 	add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3302*38fd1498Szrj 	add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3303*38fd1498Szrj 			     false);
3304*38fd1498Szrj 
3305*38fd1498Szrj 	if (!deps->readonly)
3306*38fd1498Szrj 	  reg_last->implicit_sets
3307*38fd1498Szrj 	    = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3308*38fd1498Szrj       }
3309*38fd1498Szrj 
3310*38fd1498Szrj   if (!deps->readonly)
3311*38fd1498Szrj     {
3312*38fd1498Szrj       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3313*38fd1498Szrj       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3314*38fd1498Szrj       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3315*38fd1498Szrj       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3316*38fd1498Szrj 	if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3317*38fd1498Szrj 	    || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3318*38fd1498Szrj 	  SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3319*38fd1498Szrj 
3320*38fd1498Szrj       /* Set up the pending barrier found.  */
3321*38fd1498Szrj       deps->last_reg_pending_barrier = reg_pending_barrier;
3322*38fd1498Szrj     }
3323*38fd1498Szrj 
3324*38fd1498Szrj   CLEAR_REG_SET (reg_pending_uses);
3325*38fd1498Szrj   CLEAR_REG_SET (reg_pending_clobbers);
3326*38fd1498Szrj   CLEAR_REG_SET (reg_pending_sets);
3327*38fd1498Szrj   CLEAR_REG_SET (reg_pending_control_uses);
3328*38fd1498Szrj   CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3329*38fd1498Szrj   CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3330*38fd1498Szrj 
3331*38fd1498Szrj   /* Add dependencies if a scheduling barrier was found.  */
3332*38fd1498Szrj   if (reg_pending_barrier)
3333*38fd1498Szrj     {
3334*38fd1498Szrj       /* In the case of barrier the most added dependencies are not
3335*38fd1498Szrj          real, so we use anti-dependence here.  */
3336*38fd1498Szrj       if (sched_has_condition_p (insn))
3337*38fd1498Szrj 	{
3338*38fd1498Szrj 	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3339*38fd1498Szrj 	    {
3340*38fd1498Szrj 	      struct deps_reg *reg_last = &deps->reg_last[i];
3341*38fd1498Szrj 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3342*38fd1498Szrj 				   true);
3343*38fd1498Szrj 	      add_dependence_list (insn, reg_last->sets, 0,
3344*38fd1498Szrj 				   reg_pending_barrier == TRUE_BARRIER
3345*38fd1498Szrj 				   ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3346*38fd1498Szrj 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3347*38fd1498Szrj 				   REG_DEP_ANTI, true);
3348*38fd1498Szrj 	      add_dependence_list (insn, reg_last->clobbers, 0,
3349*38fd1498Szrj 				   reg_pending_barrier == TRUE_BARRIER
3350*38fd1498Szrj 				   ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3351*38fd1498Szrj 	    }
3352*38fd1498Szrj 	}
3353*38fd1498Szrj       else
3354*38fd1498Szrj 	{
3355*38fd1498Szrj 	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3356*38fd1498Szrj 	    {
3357*38fd1498Szrj 	      struct deps_reg *reg_last = &deps->reg_last[i];
3358*38fd1498Szrj 	      add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3359*38fd1498Szrj 					    REG_DEP_ANTI, true);
3360*38fd1498Szrj 	      add_dependence_list_and_free (deps, insn,
3361*38fd1498Szrj 					    &reg_last->control_uses, 0,
3362*38fd1498Szrj 					    REG_DEP_CONTROL, true);
3363*38fd1498Szrj 	      add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3364*38fd1498Szrj 					    reg_pending_barrier == TRUE_BARRIER
3365*38fd1498Szrj 					    ? REG_DEP_TRUE : REG_DEP_ANTI,
3366*38fd1498Szrj 					    true);
3367*38fd1498Szrj 	      add_dependence_list_and_free (deps, insn,
3368*38fd1498Szrj 					    &reg_last->implicit_sets, 0,
3369*38fd1498Szrj 					    REG_DEP_ANTI, true);
3370*38fd1498Szrj 	      add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3371*38fd1498Szrj 					    reg_pending_barrier == TRUE_BARRIER
3372*38fd1498Szrj 					    ? REG_DEP_TRUE : REG_DEP_ANTI,
3373*38fd1498Szrj 					    true);
3374*38fd1498Szrj 
3375*38fd1498Szrj               if (!deps->readonly)
3376*38fd1498Szrj                 {
3377*38fd1498Szrj                   reg_last->uses_length = 0;
3378*38fd1498Szrj                   reg_last->clobbers_length = 0;
3379*38fd1498Szrj                 }
3380*38fd1498Szrj 	    }
3381*38fd1498Szrj 	}
3382*38fd1498Szrj 
3383*38fd1498Szrj       if (!deps->readonly)
3384*38fd1498Szrj         for (i = 0; i < (unsigned)deps->max_reg; i++)
3385*38fd1498Szrj           {
3386*38fd1498Szrj             struct deps_reg *reg_last = &deps->reg_last[i];
3387*38fd1498Szrj             reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3388*38fd1498Szrj             SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3389*38fd1498Szrj           }
3390*38fd1498Szrj 
3391*38fd1498Szrj       /* Don't flush pending lists on speculative checks for
3392*38fd1498Szrj 	 selective scheduling.  */
3393*38fd1498Szrj       if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
3394*38fd1498Szrj 	flush_pending_lists (deps, insn, true, true);
3395*38fd1498Szrj 
3396*38fd1498Szrj       reg_pending_barrier = NOT_A_BARRIER;
3397*38fd1498Szrj     }
3398*38fd1498Szrj 
3399*38fd1498Szrj   /* If a post-call group is still open, see if it should remain so.
3400*38fd1498Szrj      This insn must be a simple move of a hard reg to a pseudo or
3401*38fd1498Szrj      vice-versa.
3402*38fd1498Szrj 
3403*38fd1498Szrj      We must avoid moving these insns for correctness on targets
3404*38fd1498Szrj      with small register classes, and for special registers like
3405*38fd1498Szrj      PIC_OFFSET_TABLE_REGNUM.  For simplicity, extend this to all
3406*38fd1498Szrj      hard regs for all targets.  */
3407*38fd1498Szrj 
3408*38fd1498Szrj   if (deps->in_post_call_group_p)
3409*38fd1498Szrj     {
3410*38fd1498Szrj       rtx tmp, set = single_set (insn);
3411*38fd1498Szrj       int src_regno, dest_regno;
3412*38fd1498Szrj 
3413*38fd1498Szrj       if (set == NULL)
3414*38fd1498Szrj 	{
3415*38fd1498Szrj 	  if (DEBUG_INSN_P (insn))
3416*38fd1498Szrj 	    /* We don't want to mark debug insns as part of the same
3417*38fd1498Szrj 	       sched group.  We know they really aren't, but if we use
3418*38fd1498Szrj 	       debug insns to tell that a call group is over, we'll
3419*38fd1498Szrj 	       get different code if debug insns are not there and
3420*38fd1498Szrj 	       instructions that follow seem like they should be part
3421*38fd1498Szrj 	       of the call group.
3422*38fd1498Szrj 
3423*38fd1498Szrj 	       Also, if we did, chain_to_prev_insn would move the
3424*38fd1498Szrj 	       deps of the debug insn to the call insn, modifying
3425*38fd1498Szrj 	       non-debug post-dependency counts of the debug insn
3426*38fd1498Szrj 	       dependencies and otherwise messing with the scheduling
3427*38fd1498Szrj 	       order.
3428*38fd1498Szrj 
3429*38fd1498Szrj 	       Instead, let such debug insns be scheduled freely, but
3430*38fd1498Szrj 	       keep the call group open in case there are insns that
3431*38fd1498Szrj 	       should be part of it afterwards.  Since we grant debug
3432*38fd1498Szrj 	       insns higher priority than even sched group insns, it
3433*38fd1498Szrj 	       will all turn out all right.  */
3434*38fd1498Szrj 	    goto debug_dont_end_call_group;
3435*38fd1498Szrj 	  else
3436*38fd1498Szrj 	    goto end_call_group;
3437*38fd1498Szrj 	}
3438*38fd1498Szrj 
3439*38fd1498Szrj       tmp = SET_DEST (set);
3440*38fd1498Szrj       if (GET_CODE (tmp) == SUBREG)
3441*38fd1498Szrj 	tmp = SUBREG_REG (tmp);
3442*38fd1498Szrj       if (REG_P (tmp))
3443*38fd1498Szrj 	dest_regno = REGNO (tmp);
3444*38fd1498Szrj       else
3445*38fd1498Szrj 	goto end_call_group;
3446*38fd1498Szrj 
3447*38fd1498Szrj       tmp = SET_SRC (set);
3448*38fd1498Szrj       if (GET_CODE (tmp) == SUBREG)
3449*38fd1498Szrj 	tmp = SUBREG_REG (tmp);
3450*38fd1498Szrj       if ((GET_CODE (tmp) == PLUS
3451*38fd1498Szrj 	   || GET_CODE (tmp) == MINUS)
3452*38fd1498Szrj 	  && REG_P (XEXP (tmp, 0))
3453*38fd1498Szrj 	  && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3454*38fd1498Szrj 	  && dest_regno == STACK_POINTER_REGNUM)
3455*38fd1498Szrj 	src_regno = STACK_POINTER_REGNUM;
3456*38fd1498Szrj       else if (REG_P (tmp))
3457*38fd1498Szrj 	src_regno = REGNO (tmp);
3458*38fd1498Szrj       else
3459*38fd1498Szrj 	goto end_call_group;
3460*38fd1498Szrj 
3461*38fd1498Szrj       if (src_regno < FIRST_PSEUDO_REGISTER
3462*38fd1498Szrj 	  || dest_regno < FIRST_PSEUDO_REGISTER)
3463*38fd1498Szrj 	{
3464*38fd1498Szrj 	  if (!deps->readonly
3465*38fd1498Szrj               && deps->in_post_call_group_p == post_call_initial)
3466*38fd1498Szrj 	    deps->in_post_call_group_p = post_call;
3467*38fd1498Szrj 
3468*38fd1498Szrj           if (!sel_sched_p () || sched_emulate_haifa_p)
3469*38fd1498Szrj             {
3470*38fd1498Szrj               SCHED_GROUP_P (insn) = 1;
3471*38fd1498Szrj               CANT_MOVE (insn) = 1;
3472*38fd1498Szrj             }
3473*38fd1498Szrj 	}
3474*38fd1498Szrj       else
3475*38fd1498Szrj 	{
3476*38fd1498Szrj 	end_call_group:
3477*38fd1498Szrj           if (!deps->readonly)
3478*38fd1498Szrj             deps->in_post_call_group_p = not_post_call;
3479*38fd1498Szrj 	}
3480*38fd1498Szrj     }
3481*38fd1498Szrj 
3482*38fd1498Szrj  debug_dont_end_call_group:
3483*38fd1498Szrj   if ((current_sched_info->flags & DO_SPECULATION)
3484*38fd1498Szrj       && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3485*38fd1498Szrj     /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3486*38fd1498Szrj        be speculated.  */
3487*38fd1498Szrj     {
3488*38fd1498Szrj       if (sel_sched_p ())
3489*38fd1498Szrj         sel_mark_hard_insn (insn);
3490*38fd1498Szrj       else
3491*38fd1498Szrj         {
3492*38fd1498Szrj           sd_iterator_def sd_it;
3493*38fd1498Szrj           dep_t dep;
3494*38fd1498Szrj 
3495*38fd1498Szrj           for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3496*38fd1498Szrj                sd_iterator_cond (&sd_it, &dep);)
3497*38fd1498Szrj             change_spec_dep_to_hard (sd_it);
3498*38fd1498Szrj         }
3499*38fd1498Szrj     }
3500*38fd1498Szrj 
3501*38fd1498Szrj   /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3502*38fd1498Szrj      honor their original ordering.  */
3503*38fd1498Szrj   if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3504*38fd1498Szrj     {
3505*38fd1498Szrj       if (deps->last_args_size)
3506*38fd1498Szrj 	add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
3507*38fd1498Szrj       if (!deps->readonly)
3508*38fd1498Szrj 	deps->last_args_size = insn;
3509*38fd1498Szrj     }
3510*38fd1498Szrj 
3511*38fd1498Szrj   /* We must not mix prologue and epilogue insns.  See PR78029.  */
3512*38fd1498Szrj   if (prologue_contains (insn))
3513*38fd1498Szrj     {
3514*38fd1498Szrj       add_dependence_list (insn, deps->last_epilogue, true, REG_DEP_ANTI, true);
3515*38fd1498Szrj       if (!deps->readonly)
3516*38fd1498Szrj 	{
3517*38fd1498Szrj 	  if (deps->last_logue_was_epilogue)
3518*38fd1498Szrj 	    free_INSN_LIST_list (&deps->last_prologue);
3519*38fd1498Szrj 	  deps->last_prologue = alloc_INSN_LIST (insn, deps->last_prologue);
3520*38fd1498Szrj 	  deps->last_logue_was_epilogue = false;
3521*38fd1498Szrj 	}
3522*38fd1498Szrj     }
3523*38fd1498Szrj 
3524*38fd1498Szrj   if (epilogue_contains (insn))
3525*38fd1498Szrj     {
3526*38fd1498Szrj       add_dependence_list (insn, deps->last_prologue, true, REG_DEP_ANTI, true);
3527*38fd1498Szrj       if (!deps->readonly)
3528*38fd1498Szrj 	{
3529*38fd1498Szrj 	  if (!deps->last_logue_was_epilogue)
3530*38fd1498Szrj 	    free_INSN_LIST_list (&deps->last_epilogue);
3531*38fd1498Szrj 	  deps->last_epilogue = alloc_INSN_LIST (insn, deps->last_epilogue);
3532*38fd1498Szrj 	  deps->last_logue_was_epilogue = true;
3533*38fd1498Szrj 	}
3534*38fd1498Szrj     }
3535*38fd1498Szrj }
3536*38fd1498Szrj 
3537*38fd1498Szrj /* Return TRUE if INSN might not always return normally (e.g. call exit,
3538*38fd1498Szrj    longjmp, loop forever, ...).  */
3539*38fd1498Szrj /* FIXME: Why can't this function just use flags_from_decl_or_type and
3540*38fd1498Szrj    test for ECF_NORETURN?  */
3541*38fd1498Szrj static bool
call_may_noreturn_p(rtx_insn * insn)3542*38fd1498Szrj call_may_noreturn_p (rtx_insn *insn)
3543*38fd1498Szrj {
3544*38fd1498Szrj   rtx call;
3545*38fd1498Szrj 
3546*38fd1498Szrj   /* const or pure calls that aren't looping will always return.  */
3547*38fd1498Szrj   if (RTL_CONST_OR_PURE_CALL_P (insn)
3548*38fd1498Szrj       && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3549*38fd1498Szrj     return false;
3550*38fd1498Szrj 
3551*38fd1498Szrj   call = get_call_rtx_from (insn);
3552*38fd1498Szrj   if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3553*38fd1498Szrj     {
3554*38fd1498Szrj       rtx symbol = XEXP (XEXP (call, 0), 0);
3555*38fd1498Szrj       if (SYMBOL_REF_DECL (symbol)
3556*38fd1498Szrj 	  && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3557*38fd1498Szrj 	{
3558*38fd1498Szrj 	  if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3559*38fd1498Szrj 	      == BUILT_IN_NORMAL)
3560*38fd1498Szrj 	    switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3561*38fd1498Szrj 	      {
3562*38fd1498Szrj 	      case BUILT_IN_BCMP:
3563*38fd1498Szrj 	      case BUILT_IN_BCOPY:
3564*38fd1498Szrj 	      case BUILT_IN_BZERO:
3565*38fd1498Szrj 	      case BUILT_IN_INDEX:
3566*38fd1498Szrj 	      case BUILT_IN_MEMCHR:
3567*38fd1498Szrj 	      case BUILT_IN_MEMCMP:
3568*38fd1498Szrj 	      case BUILT_IN_MEMCPY:
3569*38fd1498Szrj 	      case BUILT_IN_MEMMOVE:
3570*38fd1498Szrj 	      case BUILT_IN_MEMPCPY:
3571*38fd1498Szrj 	      case BUILT_IN_MEMSET:
3572*38fd1498Szrj 	      case BUILT_IN_RINDEX:
3573*38fd1498Szrj 	      case BUILT_IN_STPCPY:
3574*38fd1498Szrj 	      case BUILT_IN_STPNCPY:
3575*38fd1498Szrj 	      case BUILT_IN_STRCAT:
3576*38fd1498Szrj 	      case BUILT_IN_STRCHR:
3577*38fd1498Szrj 	      case BUILT_IN_STRCMP:
3578*38fd1498Szrj 	      case BUILT_IN_STRCPY:
3579*38fd1498Szrj 	      case BUILT_IN_STRCSPN:
3580*38fd1498Szrj 	      case BUILT_IN_STRLEN:
3581*38fd1498Szrj 	      case BUILT_IN_STRNCAT:
3582*38fd1498Szrj 	      case BUILT_IN_STRNCMP:
3583*38fd1498Szrj 	      case BUILT_IN_STRNCPY:
3584*38fd1498Szrj 	      case BUILT_IN_STRPBRK:
3585*38fd1498Szrj 	      case BUILT_IN_STRRCHR:
3586*38fd1498Szrj 	      case BUILT_IN_STRSPN:
3587*38fd1498Szrj 	      case BUILT_IN_STRSTR:
3588*38fd1498Szrj 		/* Assume certain string/memory builtins always return.  */
3589*38fd1498Szrj 		return false;
3590*38fd1498Szrj 	      default:
3591*38fd1498Szrj 		break;
3592*38fd1498Szrj 	      }
3593*38fd1498Szrj 	}
3594*38fd1498Szrj     }
3595*38fd1498Szrj 
3596*38fd1498Szrj   /* For all other calls assume that they might not always return.  */
3597*38fd1498Szrj   return true;
3598*38fd1498Szrj }
3599*38fd1498Szrj 
3600*38fd1498Szrj /* Return true if INSN should be made dependent on the previous instruction
3601*38fd1498Szrj    group, and if all INSN's dependencies should be moved to the first
3602*38fd1498Szrj    instruction of that group.  */
3603*38fd1498Szrj 
3604*38fd1498Szrj static bool
chain_to_prev_insn_p(rtx_insn * insn)3605*38fd1498Szrj chain_to_prev_insn_p (rtx_insn *insn)
3606*38fd1498Szrj {
3607*38fd1498Szrj   /* INSN forms a group with the previous instruction.  */
3608*38fd1498Szrj   if (SCHED_GROUP_P (insn))
3609*38fd1498Szrj     return true;
3610*38fd1498Szrj 
3611*38fd1498Szrj   /* If the previous instruction clobbers a register R and this one sets
3612*38fd1498Szrj      part of R, the clobber was added specifically to help us track the
3613*38fd1498Szrj      liveness of R.  There's no point scheduling the clobber and leaving
3614*38fd1498Szrj      INSN behind, especially if we move the clobber to another block.  */
3615*38fd1498Szrj   rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
3616*38fd1498Szrj   if (prev
3617*38fd1498Szrj       && INSN_P (prev)
3618*38fd1498Szrj       && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3619*38fd1498Szrj       && GET_CODE (PATTERN (prev)) == CLOBBER)
3620*38fd1498Szrj     {
3621*38fd1498Szrj       rtx x = XEXP (PATTERN (prev), 0);
3622*38fd1498Szrj       if (set_of (x, insn))
3623*38fd1498Szrj 	return true;
3624*38fd1498Szrj     }
3625*38fd1498Szrj 
3626*38fd1498Szrj   return false;
3627*38fd1498Szrj }
3628*38fd1498Szrj 
3629*38fd1498Szrj /* Analyze INSN with DEPS as a context.  */
3630*38fd1498Szrj void
deps_analyze_insn(struct deps_desc * deps,rtx_insn * insn)3631*38fd1498Szrj deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
3632*38fd1498Szrj {
3633*38fd1498Szrj   if (sched_deps_info->start_insn)
3634*38fd1498Szrj     sched_deps_info->start_insn (insn);
3635*38fd1498Szrj 
3636*38fd1498Szrj   /* Record the condition for this insn.  */
3637*38fd1498Szrj   if (NONDEBUG_INSN_P (insn))
3638*38fd1498Szrj     {
3639*38fd1498Szrj       rtx t;
3640*38fd1498Szrj       sched_get_condition_with_rev (insn, NULL);
3641*38fd1498Szrj       t = INSN_CACHED_COND (insn);
3642*38fd1498Szrj       INSN_COND_DEPS (insn) = NULL;
3643*38fd1498Szrj       if (reload_completed
3644*38fd1498Szrj 	  && (current_sched_info->flags & DO_PREDICATION)
3645*38fd1498Szrj 	  && COMPARISON_P (t)
3646*38fd1498Szrj 	  && REG_P (XEXP (t, 0))
3647*38fd1498Szrj 	  && CONSTANT_P (XEXP (t, 1)))
3648*38fd1498Szrj 	{
3649*38fd1498Szrj 	  unsigned int regno;
3650*38fd1498Szrj 	  int nregs;
3651*38fd1498Szrj 	  rtx_insn_list *cond_deps = NULL;
3652*38fd1498Szrj 	  t = XEXP (t, 0);
3653*38fd1498Szrj 	  regno = REGNO (t);
3654*38fd1498Szrj 	  nregs = REG_NREGS (t);
3655*38fd1498Szrj 	  while (nregs-- > 0)
3656*38fd1498Szrj 	    {
3657*38fd1498Szrj 	      struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3658*38fd1498Szrj 	      cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
3659*38fd1498Szrj 	      cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
3660*38fd1498Szrj 	      cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
3661*38fd1498Szrj 	    }
3662*38fd1498Szrj 	  INSN_COND_DEPS (insn) = cond_deps;
3663*38fd1498Szrj 	}
3664*38fd1498Szrj     }
3665*38fd1498Szrj 
3666*38fd1498Szrj   if (JUMP_P (insn))
3667*38fd1498Szrj     {
3668*38fd1498Szrj       /* Make each JUMP_INSN (but not a speculative check)
3669*38fd1498Szrj          a scheduling barrier for memory references.  */
3670*38fd1498Szrj       if (!deps->readonly
3671*38fd1498Szrj           && !(sel_sched_p ()
3672*38fd1498Szrj                && sel_insn_is_speculation_check (insn)))
3673*38fd1498Szrj         {
3674*38fd1498Szrj           /* Keep the list a reasonable size.  */
3675*38fd1498Szrj           if (deps->pending_flush_length++ >= MAX_PENDING_LIST_LENGTH)
3676*38fd1498Szrj             flush_pending_lists (deps, insn, true, true);
3677*38fd1498Szrj           else
3678*38fd1498Szrj 	    deps->pending_jump_insns
3679*38fd1498Szrj               = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3680*38fd1498Szrj         }
3681*38fd1498Szrj 
3682*38fd1498Szrj       /* For each insn which shouldn't cross a jump, add a dependence.  */
3683*38fd1498Szrj       add_dependence_list_and_free (deps, insn,
3684*38fd1498Szrj 				    &deps->sched_before_next_jump, 1,
3685*38fd1498Szrj 				    REG_DEP_ANTI, true);
3686*38fd1498Szrj 
3687*38fd1498Szrj       sched_analyze_insn (deps, PATTERN (insn), insn);
3688*38fd1498Szrj     }
3689*38fd1498Szrj   else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3690*38fd1498Szrj     {
3691*38fd1498Szrj       sched_analyze_insn (deps, PATTERN (insn), insn);
3692*38fd1498Szrj     }
3693*38fd1498Szrj   else if (CALL_P (insn))
3694*38fd1498Szrj     {
3695*38fd1498Szrj       int i;
3696*38fd1498Szrj 
3697*38fd1498Szrj       CANT_MOVE (insn) = 1;
3698*38fd1498Szrj 
3699*38fd1498Szrj       if (find_reg_note (insn, REG_SETJMP, NULL))
3700*38fd1498Szrj         {
3701*38fd1498Szrj           /* This is setjmp.  Assume that all registers, not just
3702*38fd1498Szrj              hard registers, may be clobbered by this call.  */
3703*38fd1498Szrj           reg_pending_barrier = MOVE_BARRIER;
3704*38fd1498Szrj         }
3705*38fd1498Szrj       else
3706*38fd1498Szrj         {
3707*38fd1498Szrj           for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3708*38fd1498Szrj             /* A call may read and modify global register variables.  */
3709*38fd1498Szrj             if (global_regs[i])
3710*38fd1498Szrj               {
3711*38fd1498Szrj                 SET_REGNO_REG_SET (reg_pending_sets, i);
3712*38fd1498Szrj                 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3713*38fd1498Szrj               }
3714*38fd1498Szrj           /* Other call-clobbered hard regs may be clobbered.
3715*38fd1498Szrj              Since we only have a choice between 'might be clobbered'
3716*38fd1498Szrj              and 'definitely not clobbered', we must include all
3717*38fd1498Szrj              partly call-clobbered registers here.  */
3718*38fd1498Szrj 	    else if (targetm.hard_regno_call_part_clobbered (i,
3719*38fd1498Szrj 							     reg_raw_mode[i])
3720*38fd1498Szrj                      || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3721*38fd1498Szrj               SET_REGNO_REG_SET (reg_pending_clobbers, i);
3722*38fd1498Szrj           /* We don't know what set of fixed registers might be used
3723*38fd1498Szrj              by the function, but it is certain that the stack pointer
3724*38fd1498Szrj              is among them, but be conservative.  */
3725*38fd1498Szrj             else if (fixed_regs[i])
3726*38fd1498Szrj 	      SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3727*38fd1498Szrj           /* The frame pointer is normally not used by the function
3728*38fd1498Szrj              itself, but by the debugger.  */
3729*38fd1498Szrj           /* ??? MIPS o32 is an exception.  It uses the frame pointer
3730*38fd1498Szrj              in the macro expansion of jal but does not represent this
3731*38fd1498Szrj              fact in the call_insn rtl.  */
3732*38fd1498Szrj             else if (i == FRAME_POINTER_REGNUM
3733*38fd1498Szrj                      || (i == HARD_FRAME_POINTER_REGNUM
3734*38fd1498Szrj                          && (! reload_completed || frame_pointer_needed)))
3735*38fd1498Szrj 	      SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3736*38fd1498Szrj         }
3737*38fd1498Szrj 
3738*38fd1498Szrj       /* For each insn which shouldn't cross a call, add a dependence
3739*38fd1498Szrj          between that insn and this call insn.  */
3740*38fd1498Szrj       add_dependence_list_and_free (deps, insn,
3741*38fd1498Szrj                                     &deps->sched_before_next_call, 1,
3742*38fd1498Szrj                                     REG_DEP_ANTI, true);
3743*38fd1498Szrj 
3744*38fd1498Szrj       sched_analyze_insn (deps, PATTERN (insn), insn);
3745*38fd1498Szrj 
3746*38fd1498Szrj       /* If CALL would be in a sched group, then this will violate
3747*38fd1498Szrj 	 convention that sched group insns have dependencies only on the
3748*38fd1498Szrj 	 previous instruction.
3749*38fd1498Szrj 
3750*38fd1498Szrj 	 Of course one can say: "Hey!  What about head of the sched group?"
3751*38fd1498Szrj 	 And I will answer: "Basic principles (one dep per insn) are always
3752*38fd1498Szrj 	 the same."  */
3753*38fd1498Szrj       gcc_assert (!SCHED_GROUP_P (insn));
3754*38fd1498Szrj 
3755*38fd1498Szrj       /* In the absence of interprocedural alias analysis, we must flush
3756*38fd1498Szrj          all pending reads and writes, and start new dependencies starting
3757*38fd1498Szrj          from here.  But only flush writes for constant calls (which may
3758*38fd1498Szrj          be passed a pointer to something we haven't written yet).  */
3759*38fd1498Szrj       flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3760*38fd1498Szrj 
3761*38fd1498Szrj       if (!deps->readonly)
3762*38fd1498Szrj         {
3763*38fd1498Szrj           /* Remember the last function call for limiting lifetimes.  */
3764*38fd1498Szrj           free_INSN_LIST_list (&deps->last_function_call);
3765*38fd1498Szrj           deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3766*38fd1498Szrj 
3767*38fd1498Szrj 	  if (call_may_noreturn_p (insn))
3768*38fd1498Szrj 	    {
3769*38fd1498Szrj 	      /* Remember the last function call that might not always return
3770*38fd1498Szrj 		 normally for limiting moves of trapping insns.  */
3771*38fd1498Szrj 	      free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3772*38fd1498Szrj 	      deps->last_function_call_may_noreturn
3773*38fd1498Szrj 		= alloc_INSN_LIST (insn, NULL_RTX);
3774*38fd1498Szrj 	    }
3775*38fd1498Szrj 
3776*38fd1498Szrj           /* Before reload, begin a post-call group, so as to keep the
3777*38fd1498Szrj              lifetimes of hard registers correct.  */
3778*38fd1498Szrj           if (! reload_completed)
3779*38fd1498Szrj             deps->in_post_call_group_p = post_call;
3780*38fd1498Szrj         }
3781*38fd1498Szrj     }
3782*38fd1498Szrj 
3783*38fd1498Szrj   if (sched_deps_info->use_cselib)
3784*38fd1498Szrj     cselib_process_insn (insn);
3785*38fd1498Szrj 
3786*38fd1498Szrj   if (sched_deps_info->finish_insn)
3787*38fd1498Szrj     sched_deps_info->finish_insn ();
3788*38fd1498Szrj 
3789*38fd1498Szrj   /* Fixup the dependencies in the sched group.  */
3790*38fd1498Szrj   if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3791*38fd1498Szrj       && chain_to_prev_insn_p (insn)
3792*38fd1498Szrj       && !sel_sched_p ())
3793*38fd1498Szrj     chain_to_prev_insn (insn);
3794*38fd1498Szrj }
3795*38fd1498Szrj 
3796*38fd1498Szrj /* Initialize DEPS for the new block beginning with HEAD.  */
3797*38fd1498Szrj void
deps_start_bb(struct deps_desc * deps,rtx_insn * head)3798*38fd1498Szrj deps_start_bb (struct deps_desc *deps, rtx_insn *head)
3799*38fd1498Szrj {
3800*38fd1498Szrj   gcc_assert (!deps->readonly);
3801*38fd1498Szrj 
3802*38fd1498Szrj   /* Before reload, if the previous block ended in a call, show that
3803*38fd1498Szrj      we are inside a post-call group, so as to keep the lifetimes of
3804*38fd1498Szrj      hard registers correct.  */
3805*38fd1498Szrj   if (! reload_completed && !LABEL_P (head))
3806*38fd1498Szrj     {
3807*38fd1498Szrj       rtx_insn *insn = prev_nonnote_nondebug_insn (head);
3808*38fd1498Szrj 
3809*38fd1498Szrj       if (insn && CALL_P (insn))
3810*38fd1498Szrj 	deps->in_post_call_group_p = post_call_initial;
3811*38fd1498Szrj     }
3812*38fd1498Szrj }
3813*38fd1498Szrj 
3814*38fd1498Szrj /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3815*38fd1498Szrj    dependencies for each insn.  */
3816*38fd1498Szrj void
sched_analyze(struct deps_desc * deps,rtx_insn * head,rtx_insn * tail)3817*38fd1498Szrj sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
3818*38fd1498Szrj {
3819*38fd1498Szrj   rtx_insn *insn;
3820*38fd1498Szrj 
3821*38fd1498Szrj   if (sched_deps_info->use_cselib)
3822*38fd1498Szrj     cselib_init (CSELIB_RECORD_MEMORY);
3823*38fd1498Szrj 
3824*38fd1498Szrj   deps_start_bb (deps, head);
3825*38fd1498Szrj 
3826*38fd1498Szrj   for (insn = head;; insn = NEXT_INSN (insn))
3827*38fd1498Szrj     {
3828*38fd1498Szrj 
3829*38fd1498Szrj       if (INSN_P (insn))
3830*38fd1498Szrj 	{
3831*38fd1498Szrj 	  /* And initialize deps_lists.  */
3832*38fd1498Szrj 	  sd_init_insn (insn);
3833*38fd1498Szrj 	  /* Clean up SCHED_GROUP_P which may be set by last
3834*38fd1498Szrj 	     scheduler pass.  */
3835*38fd1498Szrj 	  if (SCHED_GROUP_P (insn))
3836*38fd1498Szrj 	    SCHED_GROUP_P (insn) = 0;
3837*38fd1498Szrj 	}
3838*38fd1498Szrj 
3839*38fd1498Szrj       deps_analyze_insn (deps, insn);
3840*38fd1498Szrj 
3841*38fd1498Szrj       if (insn == tail)
3842*38fd1498Szrj 	{
3843*38fd1498Szrj 	  if (sched_deps_info->use_cselib)
3844*38fd1498Szrj 	    cselib_finish ();
3845*38fd1498Szrj 	  return;
3846*38fd1498Szrj 	}
3847*38fd1498Szrj     }
3848*38fd1498Szrj   gcc_unreachable ();
3849*38fd1498Szrj }
3850*38fd1498Szrj 
3851*38fd1498Szrj /* Helper for sched_free_deps ().
3852*38fd1498Szrj    Delete INSN's (RESOLVED_P) backward dependencies.  */
3853*38fd1498Szrj static void
delete_dep_nodes_in_back_deps(rtx_insn * insn,bool resolved_p)3854*38fd1498Szrj delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p)
3855*38fd1498Szrj {
3856*38fd1498Szrj   sd_iterator_def sd_it;
3857*38fd1498Szrj   dep_t dep;
3858*38fd1498Szrj   sd_list_types_def types;
3859*38fd1498Szrj 
3860*38fd1498Szrj   if (resolved_p)
3861*38fd1498Szrj     types = SD_LIST_RES_BACK;
3862*38fd1498Szrj   else
3863*38fd1498Szrj     types = SD_LIST_BACK;
3864*38fd1498Szrj 
3865*38fd1498Szrj   for (sd_it = sd_iterator_start (insn, types);
3866*38fd1498Szrj        sd_iterator_cond (&sd_it, &dep);)
3867*38fd1498Szrj     {
3868*38fd1498Szrj       dep_link_t link = *sd_it.linkp;
3869*38fd1498Szrj       dep_node_t node = DEP_LINK_NODE (link);
3870*38fd1498Szrj       deps_list_t back_list;
3871*38fd1498Szrj       deps_list_t forw_list;
3872*38fd1498Szrj 
3873*38fd1498Szrj       get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3874*38fd1498Szrj       remove_from_deps_list (link, back_list);
3875*38fd1498Szrj       delete_dep_node (node);
3876*38fd1498Szrj     }
3877*38fd1498Szrj }
3878*38fd1498Szrj 
3879*38fd1498Szrj /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3880*38fd1498Szrj    deps_lists.  */
3881*38fd1498Szrj void
sched_free_deps(rtx_insn * head,rtx_insn * tail,bool resolved_p)3882*38fd1498Szrj sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
3883*38fd1498Szrj {
3884*38fd1498Szrj   rtx_insn *insn;
3885*38fd1498Szrj   rtx_insn *next_tail = NEXT_INSN (tail);
3886*38fd1498Szrj 
3887*38fd1498Szrj   /* We make two passes since some insns may be scheduled before their
3888*38fd1498Szrj      dependencies are resolved.  */
3889*38fd1498Szrj   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3890*38fd1498Szrj     if (INSN_P (insn) && INSN_LUID (insn) > 0)
3891*38fd1498Szrj       {
3892*38fd1498Szrj 	/* Clear forward deps and leave the dep_nodes to the
3893*38fd1498Szrj 	   corresponding back_deps list.  */
3894*38fd1498Szrj 	if (resolved_p)
3895*38fd1498Szrj 	  clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3896*38fd1498Szrj 	else
3897*38fd1498Szrj 	  clear_deps_list (INSN_FORW_DEPS (insn));
3898*38fd1498Szrj       }
3899*38fd1498Szrj   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3900*38fd1498Szrj     if (INSN_P (insn) && INSN_LUID (insn) > 0)
3901*38fd1498Szrj       {
3902*38fd1498Szrj 	/* Clear resolved back deps together with its dep_nodes.  */
3903*38fd1498Szrj 	delete_dep_nodes_in_back_deps (insn, resolved_p);
3904*38fd1498Szrj 
3905*38fd1498Szrj 	sd_finish_insn (insn);
3906*38fd1498Szrj       }
3907*38fd1498Szrj }
3908*38fd1498Szrj 
3909*38fd1498Szrj /* Initialize variables for region data dependence analysis.
3910*38fd1498Szrj    When LAZY_REG_LAST is true, do not allocate reg_last array
3911*38fd1498Szrj    of struct deps_desc immediately.  */
3912*38fd1498Szrj 
3913*38fd1498Szrj void
init_deps(struct deps_desc * deps,bool lazy_reg_last)3914*38fd1498Szrj init_deps (struct deps_desc *deps, bool lazy_reg_last)
3915*38fd1498Szrj {
3916*38fd1498Szrj   int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3917*38fd1498Szrj 
3918*38fd1498Szrj   deps->max_reg = max_reg;
3919*38fd1498Szrj   if (lazy_reg_last)
3920*38fd1498Szrj     deps->reg_last = NULL;
3921*38fd1498Szrj   else
3922*38fd1498Szrj     deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3923*38fd1498Szrj   INIT_REG_SET (&deps->reg_last_in_use);
3924*38fd1498Szrj 
3925*38fd1498Szrj   deps->pending_read_insns = 0;
3926*38fd1498Szrj   deps->pending_read_mems = 0;
3927*38fd1498Szrj   deps->pending_write_insns = 0;
3928*38fd1498Szrj   deps->pending_write_mems = 0;
3929*38fd1498Szrj   deps->pending_jump_insns = 0;
3930*38fd1498Szrj   deps->pending_read_list_length = 0;
3931*38fd1498Szrj   deps->pending_write_list_length = 0;
3932*38fd1498Szrj   deps->pending_flush_length = 0;
3933*38fd1498Szrj   deps->last_pending_memory_flush = 0;
3934*38fd1498Szrj   deps->last_function_call = 0;
3935*38fd1498Szrj   deps->last_function_call_may_noreturn = 0;
3936*38fd1498Szrj   deps->sched_before_next_call = 0;
3937*38fd1498Szrj   deps->sched_before_next_jump = 0;
3938*38fd1498Szrj   deps->in_post_call_group_p = not_post_call;
3939*38fd1498Szrj   deps->last_debug_insn = 0;
3940*38fd1498Szrj   deps->last_args_size = 0;
3941*38fd1498Szrj   deps->last_prologue = 0;
3942*38fd1498Szrj   deps->last_epilogue = 0;
3943*38fd1498Szrj   deps->last_logue_was_epilogue = false;
3944*38fd1498Szrj   deps->last_reg_pending_barrier = NOT_A_BARRIER;
3945*38fd1498Szrj   deps->readonly = 0;
3946*38fd1498Szrj }
3947*38fd1498Szrj 
3948*38fd1498Szrj /* Init only reg_last field of DEPS, which was not allocated before as
3949*38fd1498Szrj    we inited DEPS lazily.  */
3950*38fd1498Szrj void
init_deps_reg_last(struct deps_desc * deps)3951*38fd1498Szrj init_deps_reg_last (struct deps_desc *deps)
3952*38fd1498Szrj {
3953*38fd1498Szrj   gcc_assert (deps && deps->max_reg > 0);
3954*38fd1498Szrj   gcc_assert (deps->reg_last == NULL);
3955*38fd1498Szrj 
3956*38fd1498Szrj   deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3957*38fd1498Szrj }
3958*38fd1498Szrj 
3959*38fd1498Szrj 
3960*38fd1498Szrj /* Free insn lists found in DEPS.  */
3961*38fd1498Szrj 
3962*38fd1498Szrj void
free_deps(struct deps_desc * deps)3963*38fd1498Szrj free_deps (struct deps_desc *deps)
3964*38fd1498Szrj {
3965*38fd1498Szrj   unsigned i;
3966*38fd1498Szrj   reg_set_iterator rsi;
3967*38fd1498Szrj 
3968*38fd1498Szrj   /* We set max_reg to 0 when this context was already freed.  */
3969*38fd1498Szrj   if (deps->max_reg == 0)
3970*38fd1498Szrj     {
3971*38fd1498Szrj       gcc_assert (deps->reg_last == NULL);
3972*38fd1498Szrj       return;
3973*38fd1498Szrj     }
3974*38fd1498Szrj   deps->max_reg = 0;
3975*38fd1498Szrj 
3976*38fd1498Szrj   free_INSN_LIST_list (&deps->pending_read_insns);
3977*38fd1498Szrj   free_EXPR_LIST_list (&deps->pending_read_mems);
3978*38fd1498Szrj   free_INSN_LIST_list (&deps->pending_write_insns);
3979*38fd1498Szrj   free_EXPR_LIST_list (&deps->pending_write_mems);
3980*38fd1498Szrj   free_INSN_LIST_list (&deps->last_pending_memory_flush);
3981*38fd1498Szrj 
3982*38fd1498Szrj   /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3983*38fd1498Szrj      times.  For a testcase with 42000 regs and 8000 small basic blocks,
3984*38fd1498Szrj      this loop accounted for nearly 60% (84 sec) of the total -O2 runtime.  */
3985*38fd1498Szrj   EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3986*38fd1498Szrj     {
3987*38fd1498Szrj       struct deps_reg *reg_last = &deps->reg_last[i];
3988*38fd1498Szrj       if (reg_last->uses)
3989*38fd1498Szrj 	free_INSN_LIST_list (&reg_last->uses);
3990*38fd1498Szrj       if (reg_last->sets)
3991*38fd1498Szrj 	free_INSN_LIST_list (&reg_last->sets);
3992*38fd1498Szrj       if (reg_last->implicit_sets)
3993*38fd1498Szrj 	free_INSN_LIST_list (&reg_last->implicit_sets);
3994*38fd1498Szrj       if (reg_last->control_uses)
3995*38fd1498Szrj 	free_INSN_LIST_list (&reg_last->control_uses);
3996*38fd1498Szrj       if (reg_last->clobbers)
3997*38fd1498Szrj 	free_INSN_LIST_list (&reg_last->clobbers);
3998*38fd1498Szrj     }
3999*38fd1498Szrj   CLEAR_REG_SET (&deps->reg_last_in_use);
4000*38fd1498Szrj 
4001*38fd1498Szrj   /* As we initialize reg_last lazily, it is possible that we didn't allocate
4002*38fd1498Szrj      it at all.  */
4003*38fd1498Szrj   free (deps->reg_last);
4004*38fd1498Szrj   deps->reg_last = NULL;
4005*38fd1498Szrj 
4006*38fd1498Szrj   deps = NULL;
4007*38fd1498Szrj }
4008*38fd1498Szrj 
4009*38fd1498Szrj /* Remove INSN from dependence contexts DEPS.  */
4010*38fd1498Szrj void
remove_from_deps(struct deps_desc * deps,rtx_insn * insn)4011*38fd1498Szrj remove_from_deps (struct deps_desc *deps, rtx_insn *insn)
4012*38fd1498Szrj {
4013*38fd1498Szrj   int removed;
4014*38fd1498Szrj   unsigned i;
4015*38fd1498Szrj   reg_set_iterator rsi;
4016*38fd1498Szrj 
4017*38fd1498Szrj   removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
4018*38fd1498Szrj                                                &deps->pending_read_mems);
4019*38fd1498Szrj   if (!DEBUG_INSN_P (insn))
4020*38fd1498Szrj     deps->pending_read_list_length -= removed;
4021*38fd1498Szrj   removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
4022*38fd1498Szrj                                                &deps->pending_write_mems);
4023*38fd1498Szrj   deps->pending_write_list_length -= removed;
4024*38fd1498Szrj 
4025*38fd1498Szrj   removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
4026*38fd1498Szrj   deps->pending_flush_length -= removed;
4027*38fd1498Szrj   removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
4028*38fd1498Szrj   deps->pending_flush_length -= removed;
4029*38fd1498Szrj 
4030*38fd1498Szrj   unsigned to_clear = -1U;
4031*38fd1498Szrj   EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
4032*38fd1498Szrj     {
4033*38fd1498Szrj       if (to_clear != -1U)
4034*38fd1498Szrj 	{
4035*38fd1498Szrj 	  CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
4036*38fd1498Szrj 	  to_clear = -1U;
4037*38fd1498Szrj 	}
4038*38fd1498Szrj       struct deps_reg *reg_last = &deps->reg_last[i];
4039*38fd1498Szrj       if (reg_last->uses)
4040*38fd1498Szrj 	remove_from_dependence_list (insn, &reg_last->uses);
4041*38fd1498Szrj       if (reg_last->sets)
4042*38fd1498Szrj 	remove_from_dependence_list (insn, &reg_last->sets);
4043*38fd1498Szrj       if (reg_last->implicit_sets)
4044*38fd1498Szrj 	remove_from_dependence_list (insn, &reg_last->implicit_sets);
4045*38fd1498Szrj       if (reg_last->clobbers)
4046*38fd1498Szrj 	remove_from_dependence_list (insn, &reg_last->clobbers);
4047*38fd1498Szrj       if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
4048*38fd1498Szrj 	  && !reg_last->clobbers)
4049*38fd1498Szrj 	to_clear = i;
4050*38fd1498Szrj     }
4051*38fd1498Szrj   if (to_clear != -1U)
4052*38fd1498Szrj     CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
4053*38fd1498Szrj 
4054*38fd1498Szrj   if (CALL_P (insn))
4055*38fd1498Szrj     {
4056*38fd1498Szrj       remove_from_dependence_list (insn, &deps->last_function_call);
4057*38fd1498Szrj       remove_from_dependence_list (insn,
4058*38fd1498Szrj 				   &deps->last_function_call_may_noreturn);
4059*38fd1498Szrj     }
4060*38fd1498Szrj   remove_from_dependence_list (insn, &deps->sched_before_next_call);
4061*38fd1498Szrj }
4062*38fd1498Szrj 
4063*38fd1498Szrj /* Init deps data vector.  */
4064*38fd1498Szrj static void
init_deps_data_vector(void)4065*38fd1498Szrj init_deps_data_vector (void)
4066*38fd1498Szrj {
4067*38fd1498Szrj   int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4068*38fd1498Szrj   if (reserve > 0 && ! h_d_i_d.space (reserve))
4069*38fd1498Szrj     h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
4070*38fd1498Szrj }
4071*38fd1498Szrj 
4072*38fd1498Szrj /* If it is profitable to use them, initialize or extend (depending on
4073*38fd1498Szrj    GLOBAL_P) dependency data.  */
4074*38fd1498Szrj void
sched_deps_init(bool global_p)4075*38fd1498Szrj sched_deps_init (bool global_p)
4076*38fd1498Szrj {
4077*38fd1498Szrj   /* Average number of insns in the basic block.
4078*38fd1498Szrj      '+ 1' is used to make it nonzero.  */
4079*38fd1498Szrj   int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
4080*38fd1498Szrj 
4081*38fd1498Szrj   init_deps_data_vector ();
4082*38fd1498Szrj 
4083*38fd1498Szrj   /* We use another caching mechanism for selective scheduling, so
4084*38fd1498Szrj      we don't use this one.  */
4085*38fd1498Szrj   if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4086*38fd1498Szrj     {
4087*38fd1498Szrj       /* ?!? We could save some memory by computing a per-region luid mapping
4088*38fd1498Szrj          which could reduce both the number of vectors in the cache and the
4089*38fd1498Szrj          size of each vector.  Instead we just avoid the cache entirely unless
4090*38fd1498Szrj          the average number of instructions in a basic block is very high.  See
4091*38fd1498Szrj          the comment before the declaration of true_dependency_cache for
4092*38fd1498Szrj          what we consider "very high".  */
4093*38fd1498Szrj       cache_size = 0;
4094*38fd1498Szrj       extend_dependency_caches (sched_max_luid, true);
4095*38fd1498Szrj     }
4096*38fd1498Szrj 
4097*38fd1498Szrj   if (global_p)
4098*38fd1498Szrj     {
4099*38fd1498Szrj       dl_pool = new object_allocator<_deps_list> ("deps_list");
4100*38fd1498Szrj 				/* Allocate lists for one block at a time.  */
4101*38fd1498Szrj       dn_pool = new object_allocator<_dep_node> ("dep_node");
4102*38fd1498Szrj 				/* Allocate nodes for one block at a time.  */
4103*38fd1498Szrj     }
4104*38fd1498Szrj }
4105*38fd1498Szrj 
4106*38fd1498Szrj 
4107*38fd1498Szrj /* Create or extend (depending on CREATE_P) dependency caches to
4108*38fd1498Szrj    size N.  */
4109*38fd1498Szrj void
extend_dependency_caches(int n,bool create_p)4110*38fd1498Szrj extend_dependency_caches (int n, bool create_p)
4111*38fd1498Szrj {
4112*38fd1498Szrj   if (create_p || true_dependency_cache)
4113*38fd1498Szrj     {
4114*38fd1498Szrj       int i, luid = cache_size + n;
4115*38fd1498Szrj 
4116*38fd1498Szrj       true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4117*38fd1498Szrj 					  luid);
4118*38fd1498Szrj       output_dependency_cache = XRESIZEVEC (bitmap_head,
4119*38fd1498Szrj 					    output_dependency_cache, luid);
4120*38fd1498Szrj       anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4121*38fd1498Szrj 					  luid);
4122*38fd1498Szrj       control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4123*38fd1498Szrj 					  luid);
4124*38fd1498Szrj 
4125*38fd1498Szrj       if (current_sched_info->flags & DO_SPECULATION)
4126*38fd1498Szrj         spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4127*38fd1498Szrj 					    luid);
4128*38fd1498Szrj 
4129*38fd1498Szrj       for (i = cache_size; i < luid; i++)
4130*38fd1498Szrj 	{
4131*38fd1498Szrj 	  bitmap_initialize (&true_dependency_cache[i], 0);
4132*38fd1498Szrj 	  bitmap_initialize (&output_dependency_cache[i], 0);
4133*38fd1498Szrj 	  bitmap_initialize (&anti_dependency_cache[i], 0);
4134*38fd1498Szrj 	  bitmap_initialize (&control_dependency_cache[i], 0);
4135*38fd1498Szrj 
4136*38fd1498Szrj           if (current_sched_info->flags & DO_SPECULATION)
4137*38fd1498Szrj             bitmap_initialize (&spec_dependency_cache[i], 0);
4138*38fd1498Szrj 	}
4139*38fd1498Szrj       cache_size = luid;
4140*38fd1498Szrj     }
4141*38fd1498Szrj }
4142*38fd1498Szrj 
4143*38fd1498Szrj /* Finalize dependency information for the whole function.  */
4144*38fd1498Szrj void
sched_deps_finish(void)4145*38fd1498Szrj sched_deps_finish (void)
4146*38fd1498Szrj {
4147*38fd1498Szrj   gcc_assert (deps_pools_are_empty_p ());
4148*38fd1498Szrj   delete dn_pool;
4149*38fd1498Szrj   delete dl_pool;
4150*38fd1498Szrj   dn_pool = NULL;
4151*38fd1498Szrj   dl_pool = NULL;
4152*38fd1498Szrj 
4153*38fd1498Szrj   h_d_i_d.release ();
4154*38fd1498Szrj   cache_size = 0;
4155*38fd1498Szrj 
4156*38fd1498Szrj   if (true_dependency_cache)
4157*38fd1498Szrj     {
4158*38fd1498Szrj       int i;
4159*38fd1498Szrj 
4160*38fd1498Szrj       for (i = 0; i < cache_size; i++)
4161*38fd1498Szrj 	{
4162*38fd1498Szrj 	  bitmap_clear (&true_dependency_cache[i]);
4163*38fd1498Szrj 	  bitmap_clear (&output_dependency_cache[i]);
4164*38fd1498Szrj 	  bitmap_clear (&anti_dependency_cache[i]);
4165*38fd1498Szrj 	  bitmap_clear (&control_dependency_cache[i]);
4166*38fd1498Szrj 
4167*38fd1498Szrj           if (sched_deps_info->generate_spec_deps)
4168*38fd1498Szrj             bitmap_clear (&spec_dependency_cache[i]);
4169*38fd1498Szrj 	}
4170*38fd1498Szrj       free (true_dependency_cache);
4171*38fd1498Szrj       true_dependency_cache = NULL;
4172*38fd1498Szrj       free (output_dependency_cache);
4173*38fd1498Szrj       output_dependency_cache = NULL;
4174*38fd1498Szrj       free (anti_dependency_cache);
4175*38fd1498Szrj       anti_dependency_cache = NULL;
4176*38fd1498Szrj       free (control_dependency_cache);
4177*38fd1498Szrj       control_dependency_cache = NULL;
4178*38fd1498Szrj 
4179*38fd1498Szrj       if (sched_deps_info->generate_spec_deps)
4180*38fd1498Szrj         {
4181*38fd1498Szrj           free (spec_dependency_cache);
4182*38fd1498Szrj           spec_dependency_cache = NULL;
4183*38fd1498Szrj         }
4184*38fd1498Szrj 
4185*38fd1498Szrj     }
4186*38fd1498Szrj }
4187*38fd1498Szrj 
4188*38fd1498Szrj /* Initialize some global variables needed by the dependency analysis
4189*38fd1498Szrj    code.  */
4190*38fd1498Szrj 
4191*38fd1498Szrj void
init_deps_global(void)4192*38fd1498Szrj init_deps_global (void)
4193*38fd1498Szrj {
4194*38fd1498Szrj   CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4195*38fd1498Szrj   CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4196*38fd1498Szrj   reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4197*38fd1498Szrj   reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4198*38fd1498Szrj   reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
4199*38fd1498Szrj   reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
4200*38fd1498Szrj   reg_pending_barrier = NOT_A_BARRIER;
4201*38fd1498Szrj 
4202*38fd1498Szrj   if (!sel_sched_p () || sched_emulate_haifa_p)
4203*38fd1498Szrj     {
4204*38fd1498Szrj       sched_deps_info->start_insn = haifa_start_insn;
4205*38fd1498Szrj       sched_deps_info->finish_insn = haifa_finish_insn;
4206*38fd1498Szrj 
4207*38fd1498Szrj       sched_deps_info->note_reg_set = haifa_note_reg_set;
4208*38fd1498Szrj       sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4209*38fd1498Szrj       sched_deps_info->note_reg_use = haifa_note_reg_use;
4210*38fd1498Szrj 
4211*38fd1498Szrj       sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4212*38fd1498Szrj       sched_deps_info->note_dep = haifa_note_dep;
4213*38fd1498Szrj    }
4214*38fd1498Szrj }
4215*38fd1498Szrj 
4216*38fd1498Szrj /* Free everything used by the dependency analysis code.  */
4217*38fd1498Szrj 
4218*38fd1498Szrj void
finish_deps_global(void)4219*38fd1498Szrj finish_deps_global (void)
4220*38fd1498Szrj {
4221*38fd1498Szrj   FREE_REG_SET (reg_pending_sets);
4222*38fd1498Szrj   FREE_REG_SET (reg_pending_clobbers);
4223*38fd1498Szrj   FREE_REG_SET (reg_pending_uses);
4224*38fd1498Szrj   FREE_REG_SET (reg_pending_control_uses);
4225*38fd1498Szrj }
4226*38fd1498Szrj 
4227*38fd1498Szrj /* Estimate the weakness of dependence between MEM1 and MEM2.  */
4228*38fd1498Szrj dw_t
estimate_dep_weak(rtx mem1,rtx mem2)4229*38fd1498Szrj estimate_dep_weak (rtx mem1, rtx mem2)
4230*38fd1498Szrj {
4231*38fd1498Szrj   if (mem1 == mem2)
4232*38fd1498Szrj     /* MEMs are the same - don't speculate.  */
4233*38fd1498Szrj     return MIN_DEP_WEAK;
4234*38fd1498Szrj 
4235*38fd1498Szrj   rtx r1 = XEXP (mem1, 0);
4236*38fd1498Szrj   rtx r2 = XEXP (mem2, 0);
4237*38fd1498Szrj 
4238*38fd1498Szrj   if (sched_deps_info->use_cselib)
4239*38fd1498Szrj     {
4240*38fd1498Szrj       /* We cannot call rtx_equal_for_cselib_p because the VALUEs might be
4241*38fd1498Szrj 	 dangling at this point, since we never preserve them.  Instead we
4242*38fd1498Szrj 	 canonicalize manually to get stable VALUEs out of hashing.  */
4243*38fd1498Szrj       if (GET_CODE (r1) == VALUE && CSELIB_VAL_PTR (r1))
4244*38fd1498Szrj 	r1 = canonical_cselib_val (CSELIB_VAL_PTR (r1))->val_rtx;
4245*38fd1498Szrj       if (GET_CODE (r2) == VALUE && CSELIB_VAL_PTR (r2))
4246*38fd1498Szrj 	r2 = canonical_cselib_val (CSELIB_VAL_PTR (r2))->val_rtx;
4247*38fd1498Szrj     }
4248*38fd1498Szrj 
4249*38fd1498Szrj   if (r1 == r2
4250*38fd1498Szrj       || (REG_P (r1) && REG_P (r2) && REGNO (r1) == REGNO (r2)))
4251*38fd1498Szrj     /* Again, MEMs are the same.  */
4252*38fd1498Szrj     return MIN_DEP_WEAK;
4253*38fd1498Szrj   else if ((REG_P (r1) && !REG_P (r2)) || (!REG_P (r1) && REG_P (r2)))
4254*38fd1498Szrj     /* Different addressing modes - reason to be more speculative,
4255*38fd1498Szrj        than usual.  */
4256*38fd1498Szrj     return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4257*38fd1498Szrj   else
4258*38fd1498Szrj     /* We can't say anything about the dependence.  */
4259*38fd1498Szrj     return UNCERTAIN_DEP_WEAK;
4260*38fd1498Szrj }
4261*38fd1498Szrj 
4262*38fd1498Szrj /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4263*38fd1498Szrj    This function can handle same INSN and ELEM (INSN == ELEM).
4264*38fd1498Szrj    It is a convenience wrapper.  */
4265*38fd1498Szrj static void
add_dependence_1(rtx_insn * insn,rtx_insn * elem,enum reg_note dep_type)4266*38fd1498Szrj add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
4267*38fd1498Szrj {
4268*38fd1498Szrj   ds_t ds;
4269*38fd1498Szrj   bool internal;
4270*38fd1498Szrj 
4271*38fd1498Szrj   if (dep_type == REG_DEP_TRUE)
4272*38fd1498Szrj     ds = DEP_TRUE;
4273*38fd1498Szrj   else if (dep_type == REG_DEP_OUTPUT)
4274*38fd1498Szrj     ds = DEP_OUTPUT;
4275*38fd1498Szrj   else if (dep_type == REG_DEP_CONTROL)
4276*38fd1498Szrj     ds = DEP_CONTROL;
4277*38fd1498Szrj   else
4278*38fd1498Szrj     {
4279*38fd1498Szrj       gcc_assert (dep_type == REG_DEP_ANTI);
4280*38fd1498Szrj       ds = DEP_ANTI;
4281*38fd1498Szrj     }
4282*38fd1498Szrj 
4283*38fd1498Szrj   /* When add_dependence is called from inside sched-deps.c, we expect
4284*38fd1498Szrj      cur_insn to be non-null.  */
4285*38fd1498Szrj   internal = cur_insn != NULL;
4286*38fd1498Szrj   if (internal)
4287*38fd1498Szrj     gcc_assert (insn == cur_insn);
4288*38fd1498Szrj   else
4289*38fd1498Szrj     cur_insn = insn;
4290*38fd1498Szrj 
4291*38fd1498Szrj   note_dep (elem, ds);
4292*38fd1498Szrj   if (!internal)
4293*38fd1498Szrj     cur_insn = NULL;
4294*38fd1498Szrj }
4295*38fd1498Szrj 
4296*38fd1498Szrj /* Return weakness of speculative type TYPE in the dep_status DS,
4297*38fd1498Szrj    without checking to prevent ICEs on malformed input.  */
4298*38fd1498Szrj static dw_t
get_dep_weak_1(ds_t ds,ds_t type)4299*38fd1498Szrj get_dep_weak_1 (ds_t ds, ds_t type)
4300*38fd1498Szrj {
4301*38fd1498Szrj   ds = ds & type;
4302*38fd1498Szrj 
4303*38fd1498Szrj   switch (type)
4304*38fd1498Szrj     {
4305*38fd1498Szrj     case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4306*38fd1498Szrj     case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4307*38fd1498Szrj     case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4308*38fd1498Szrj     case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4309*38fd1498Szrj     default: gcc_unreachable ();
4310*38fd1498Szrj     }
4311*38fd1498Szrj 
4312*38fd1498Szrj   return (dw_t) ds;
4313*38fd1498Szrj }
4314*38fd1498Szrj 
4315*38fd1498Szrj /* Return weakness of speculative type TYPE in the dep_status DS.  */
4316*38fd1498Szrj dw_t
get_dep_weak(ds_t ds,ds_t type)4317*38fd1498Szrj get_dep_weak (ds_t ds, ds_t type)
4318*38fd1498Szrj {
4319*38fd1498Szrj   dw_t dw = get_dep_weak_1 (ds, type);
4320*38fd1498Szrj 
4321*38fd1498Szrj   gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4322*38fd1498Szrj   return dw;
4323*38fd1498Szrj }
4324*38fd1498Szrj 
4325*38fd1498Szrj /* Return the dep_status, which has the same parameters as DS, except for
4326*38fd1498Szrj    speculative type TYPE, that will have weakness DW.  */
4327*38fd1498Szrj ds_t
set_dep_weak(ds_t ds,ds_t type,dw_t dw)4328*38fd1498Szrj set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4329*38fd1498Szrj {
4330*38fd1498Szrj   gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4331*38fd1498Szrj 
4332*38fd1498Szrj   ds &= ~type;
4333*38fd1498Szrj   switch (type)
4334*38fd1498Szrj     {
4335*38fd1498Szrj     case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4336*38fd1498Szrj     case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4337*38fd1498Szrj     case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4338*38fd1498Szrj     case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4339*38fd1498Szrj     default: gcc_unreachable ();
4340*38fd1498Szrj     }
4341*38fd1498Szrj   return ds;
4342*38fd1498Szrj }
4343*38fd1498Szrj 
4344*38fd1498Szrj /* Return the join of two dep_statuses DS1 and DS2.
4345*38fd1498Szrj    If MAX_P is true then choose the greater probability,
4346*38fd1498Szrj    otherwise multiply probabilities.
4347*38fd1498Szrj    This function assumes that both DS1 and DS2 contain speculative bits.  */
4348*38fd1498Szrj static ds_t
ds_merge_1(ds_t ds1,ds_t ds2,bool max_p)4349*38fd1498Szrj ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4350*38fd1498Szrj {
4351*38fd1498Szrj   ds_t ds, t;
4352*38fd1498Szrj 
4353*38fd1498Szrj   gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4354*38fd1498Szrj 
4355*38fd1498Szrj   ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4356*38fd1498Szrj 
4357*38fd1498Szrj   t = FIRST_SPEC_TYPE;
4358*38fd1498Szrj   do
4359*38fd1498Szrj     {
4360*38fd1498Szrj       if ((ds1 & t) && !(ds2 & t))
4361*38fd1498Szrj 	ds |= ds1 & t;
4362*38fd1498Szrj       else if (!(ds1 & t) && (ds2 & t))
4363*38fd1498Szrj 	ds |= ds2 & t;
4364*38fd1498Szrj       else if ((ds1 & t) && (ds2 & t))
4365*38fd1498Szrj 	{
4366*38fd1498Szrj 	  dw_t dw1 = get_dep_weak (ds1, t);
4367*38fd1498Szrj 	  dw_t dw2 = get_dep_weak (ds2, t);
4368*38fd1498Szrj 	  ds_t dw;
4369*38fd1498Szrj 
4370*38fd1498Szrj 	  if (!max_p)
4371*38fd1498Szrj 	    {
4372*38fd1498Szrj 	      dw = ((ds_t) dw1) * ((ds_t) dw2);
4373*38fd1498Szrj 	      dw /= MAX_DEP_WEAK;
4374*38fd1498Szrj 	      if (dw < MIN_DEP_WEAK)
4375*38fd1498Szrj 		dw = MIN_DEP_WEAK;
4376*38fd1498Szrj 	    }
4377*38fd1498Szrj 	  else
4378*38fd1498Szrj 	    {
4379*38fd1498Szrj 	      if (dw1 >= dw2)
4380*38fd1498Szrj 		dw = dw1;
4381*38fd1498Szrj 	      else
4382*38fd1498Szrj 		dw = dw2;
4383*38fd1498Szrj 	    }
4384*38fd1498Szrj 
4385*38fd1498Szrj 	  ds = set_dep_weak (ds, t, (dw_t) dw);
4386*38fd1498Szrj 	}
4387*38fd1498Szrj 
4388*38fd1498Szrj       if (t == LAST_SPEC_TYPE)
4389*38fd1498Szrj 	break;
4390*38fd1498Szrj       t <<= SPEC_TYPE_SHIFT;
4391*38fd1498Szrj     }
4392*38fd1498Szrj   while (1);
4393*38fd1498Szrj 
4394*38fd1498Szrj   return ds;
4395*38fd1498Szrj }
4396*38fd1498Szrj 
4397*38fd1498Szrj /* Return the join of two dep_statuses DS1 and DS2.
4398*38fd1498Szrj    This function assumes that both DS1 and DS2 contain speculative bits.  */
4399*38fd1498Szrj ds_t
ds_merge(ds_t ds1,ds_t ds2)4400*38fd1498Szrj ds_merge (ds_t ds1, ds_t ds2)
4401*38fd1498Szrj {
4402*38fd1498Szrj   return ds_merge_1 (ds1, ds2, false);
4403*38fd1498Szrj }
4404*38fd1498Szrj 
4405*38fd1498Szrj /* Return the join of two dep_statuses DS1 and DS2.  */
4406*38fd1498Szrj ds_t
ds_full_merge(ds_t ds,ds_t ds2,rtx mem1,rtx mem2)4407*38fd1498Szrj ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4408*38fd1498Szrj {
4409*38fd1498Szrj   ds_t new_status = ds | ds2;
4410*38fd1498Szrj 
4411*38fd1498Szrj   if (new_status & SPECULATIVE)
4412*38fd1498Szrj     {
4413*38fd1498Szrj       if ((ds && !(ds & SPECULATIVE))
4414*38fd1498Szrj 	  || (ds2 && !(ds2 & SPECULATIVE)))
4415*38fd1498Szrj 	/* Then this dep can't be speculative.  */
4416*38fd1498Szrj 	new_status &= ~SPECULATIVE;
4417*38fd1498Szrj       else
4418*38fd1498Szrj 	{
4419*38fd1498Szrj 	  /* Both are speculative.  Merging probabilities.  */
4420*38fd1498Szrj 	  if (mem1)
4421*38fd1498Szrj 	    {
4422*38fd1498Szrj 	      dw_t dw;
4423*38fd1498Szrj 
4424*38fd1498Szrj 	      dw = estimate_dep_weak (mem1, mem2);
4425*38fd1498Szrj 	      ds = set_dep_weak (ds, BEGIN_DATA, dw);
4426*38fd1498Szrj 	    }
4427*38fd1498Szrj 
4428*38fd1498Szrj 	  if (!ds)
4429*38fd1498Szrj 	    new_status = ds2;
4430*38fd1498Szrj 	  else if (!ds2)
4431*38fd1498Szrj 	    new_status = ds;
4432*38fd1498Szrj 	  else
4433*38fd1498Szrj 	    new_status = ds_merge (ds2, ds);
4434*38fd1498Szrj 	}
4435*38fd1498Szrj     }
4436*38fd1498Szrj 
4437*38fd1498Szrj   return new_status;
4438*38fd1498Szrj }
4439*38fd1498Szrj 
4440*38fd1498Szrj /* Return the join of DS1 and DS2.  Use maximum instead of multiplying
4441*38fd1498Szrj    probabilities.  */
4442*38fd1498Szrj ds_t
ds_max_merge(ds_t ds1,ds_t ds2)4443*38fd1498Szrj ds_max_merge (ds_t ds1, ds_t ds2)
4444*38fd1498Szrj {
4445*38fd1498Szrj   if (ds1 == 0 && ds2 == 0)
4446*38fd1498Szrj     return 0;
4447*38fd1498Szrj 
4448*38fd1498Szrj   if (ds1 == 0 && ds2 != 0)
4449*38fd1498Szrj     return ds2;
4450*38fd1498Szrj 
4451*38fd1498Szrj   if (ds1 != 0 && ds2 == 0)
4452*38fd1498Szrj     return ds1;
4453*38fd1498Szrj 
4454*38fd1498Szrj   return ds_merge_1 (ds1, ds2, true);
4455*38fd1498Szrj }
4456*38fd1498Szrj 
4457*38fd1498Szrj /* Return the probability of speculation success for the speculation
4458*38fd1498Szrj    status DS.  */
4459*38fd1498Szrj dw_t
ds_weak(ds_t ds)4460*38fd1498Szrj ds_weak (ds_t ds)
4461*38fd1498Szrj {
4462*38fd1498Szrj   ds_t res = 1, dt;
4463*38fd1498Szrj   int n = 0;
4464*38fd1498Szrj 
4465*38fd1498Szrj   dt = FIRST_SPEC_TYPE;
4466*38fd1498Szrj   do
4467*38fd1498Szrj     {
4468*38fd1498Szrj       if (ds & dt)
4469*38fd1498Szrj 	{
4470*38fd1498Szrj 	  res *= (ds_t) get_dep_weak (ds, dt);
4471*38fd1498Szrj 	  n++;
4472*38fd1498Szrj 	}
4473*38fd1498Szrj 
4474*38fd1498Szrj       if (dt == LAST_SPEC_TYPE)
4475*38fd1498Szrj 	break;
4476*38fd1498Szrj       dt <<= SPEC_TYPE_SHIFT;
4477*38fd1498Szrj     }
4478*38fd1498Szrj   while (1);
4479*38fd1498Szrj 
4480*38fd1498Szrj   gcc_assert (n);
4481*38fd1498Szrj   while (--n)
4482*38fd1498Szrj     res /= MAX_DEP_WEAK;
4483*38fd1498Szrj 
4484*38fd1498Szrj   if (res < MIN_DEP_WEAK)
4485*38fd1498Szrj     res = MIN_DEP_WEAK;
4486*38fd1498Szrj 
4487*38fd1498Szrj   gcc_assert (res <= MAX_DEP_WEAK);
4488*38fd1498Szrj 
4489*38fd1498Szrj   return (dw_t) res;
4490*38fd1498Szrj }
4491*38fd1498Szrj 
4492*38fd1498Szrj /* Return a dep status that contains all speculation types of DS.  */
4493*38fd1498Szrj ds_t
ds_get_speculation_types(ds_t ds)4494*38fd1498Szrj ds_get_speculation_types (ds_t ds)
4495*38fd1498Szrj {
4496*38fd1498Szrj   if (ds & BEGIN_DATA)
4497*38fd1498Szrj     ds |= BEGIN_DATA;
4498*38fd1498Szrj   if (ds & BE_IN_DATA)
4499*38fd1498Szrj     ds |= BE_IN_DATA;
4500*38fd1498Szrj   if (ds & BEGIN_CONTROL)
4501*38fd1498Szrj     ds |= BEGIN_CONTROL;
4502*38fd1498Szrj   if (ds & BE_IN_CONTROL)
4503*38fd1498Szrj     ds |= BE_IN_CONTROL;
4504*38fd1498Szrj 
4505*38fd1498Szrj   return ds & SPECULATIVE;
4506*38fd1498Szrj }
4507*38fd1498Szrj 
4508*38fd1498Szrj /* Return a dep status that contains maximal weakness for each speculation
4509*38fd1498Szrj    type present in DS.  */
4510*38fd1498Szrj ds_t
ds_get_max_dep_weak(ds_t ds)4511*38fd1498Szrj ds_get_max_dep_weak (ds_t ds)
4512*38fd1498Szrj {
4513*38fd1498Szrj   if (ds & BEGIN_DATA)
4514*38fd1498Szrj     ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4515*38fd1498Szrj   if (ds & BE_IN_DATA)
4516*38fd1498Szrj     ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4517*38fd1498Szrj   if (ds & BEGIN_CONTROL)
4518*38fd1498Szrj     ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4519*38fd1498Szrj   if (ds & BE_IN_CONTROL)
4520*38fd1498Szrj     ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4521*38fd1498Szrj 
4522*38fd1498Szrj   return ds;
4523*38fd1498Szrj }
4524*38fd1498Szrj 
4525*38fd1498Szrj /* Dump information about the dependence status S.  */
4526*38fd1498Szrj static void
dump_ds(FILE * f,ds_t s)4527*38fd1498Szrj dump_ds (FILE *f, ds_t s)
4528*38fd1498Szrj {
4529*38fd1498Szrj   fprintf (f, "{");
4530*38fd1498Szrj 
4531*38fd1498Szrj   if (s & BEGIN_DATA)
4532*38fd1498Szrj     fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4533*38fd1498Szrj   if (s & BE_IN_DATA)
4534*38fd1498Szrj     fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4535*38fd1498Szrj   if (s & BEGIN_CONTROL)
4536*38fd1498Szrj     fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4537*38fd1498Szrj   if (s & BE_IN_CONTROL)
4538*38fd1498Szrj     fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4539*38fd1498Szrj 
4540*38fd1498Szrj   if (s & HARD_DEP)
4541*38fd1498Szrj     fprintf (f, "HARD_DEP; ");
4542*38fd1498Szrj 
4543*38fd1498Szrj   if (s & DEP_TRUE)
4544*38fd1498Szrj     fprintf (f, "DEP_TRUE; ");
4545*38fd1498Szrj   if (s & DEP_OUTPUT)
4546*38fd1498Szrj     fprintf (f, "DEP_OUTPUT; ");
4547*38fd1498Szrj   if (s & DEP_ANTI)
4548*38fd1498Szrj     fprintf (f, "DEP_ANTI; ");
4549*38fd1498Szrj   if (s & DEP_CONTROL)
4550*38fd1498Szrj     fprintf (f, "DEP_CONTROL; ");
4551*38fd1498Szrj 
4552*38fd1498Szrj   fprintf (f, "}");
4553*38fd1498Szrj }
4554*38fd1498Szrj 
4555*38fd1498Szrj DEBUG_FUNCTION void
debug_ds(ds_t s)4556*38fd1498Szrj debug_ds (ds_t s)
4557*38fd1498Szrj {
4558*38fd1498Szrj   dump_ds (stderr, s);
4559*38fd1498Szrj   fprintf (stderr, "\n");
4560*38fd1498Szrj }
4561*38fd1498Szrj 
4562*38fd1498Szrj /* Verify that dependence type and status are consistent.
4563*38fd1498Szrj    If RELAXED_P is true, then skip dep_weakness checks.  */
4564*38fd1498Szrj static void
check_dep(dep_t dep,bool relaxed_p)4565*38fd1498Szrj check_dep (dep_t dep, bool relaxed_p)
4566*38fd1498Szrj {
4567*38fd1498Szrj   enum reg_note dt = DEP_TYPE (dep);
4568*38fd1498Szrj   ds_t ds = DEP_STATUS (dep);
4569*38fd1498Szrj 
4570*38fd1498Szrj   gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4571*38fd1498Szrj 
4572*38fd1498Szrj   if (!(current_sched_info->flags & USE_DEPS_LIST))
4573*38fd1498Szrj     {
4574*38fd1498Szrj       gcc_assert (ds == 0);
4575*38fd1498Szrj       return;
4576*38fd1498Szrj     }
4577*38fd1498Szrj 
4578*38fd1498Szrj   /* Check that dependence type contains the same bits as the status.  */
4579*38fd1498Szrj   if (dt == REG_DEP_TRUE)
4580*38fd1498Szrj     gcc_assert (ds & DEP_TRUE);
4581*38fd1498Szrj   else if (dt == REG_DEP_OUTPUT)
4582*38fd1498Szrj     gcc_assert ((ds & DEP_OUTPUT)
4583*38fd1498Szrj 		&& !(ds & DEP_TRUE));
4584*38fd1498Szrj   else if (dt == REG_DEP_ANTI)
4585*38fd1498Szrj     gcc_assert ((ds & DEP_ANTI)
4586*38fd1498Szrj 		&& !(ds & (DEP_OUTPUT | DEP_TRUE)));
4587*38fd1498Szrj   else
4588*38fd1498Szrj     gcc_assert (dt == REG_DEP_CONTROL
4589*38fd1498Szrj 		&& (ds & DEP_CONTROL)
4590*38fd1498Szrj 		&& !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4591*38fd1498Szrj 
4592*38fd1498Szrj   /* HARD_DEP can not appear in dep_status of a link.  */
4593*38fd1498Szrj   gcc_assert (!(ds & HARD_DEP));
4594*38fd1498Szrj 
4595*38fd1498Szrj   /* Check that dependence status is set correctly when speculation is not
4596*38fd1498Szrj      supported.  */
4597*38fd1498Szrj   if (!sched_deps_info->generate_spec_deps)
4598*38fd1498Szrj     gcc_assert (!(ds & SPECULATIVE));
4599*38fd1498Szrj   else if (ds & SPECULATIVE)
4600*38fd1498Szrj     {
4601*38fd1498Szrj       if (!relaxed_p)
4602*38fd1498Szrj 	{
4603*38fd1498Szrj 	  ds_t type = FIRST_SPEC_TYPE;
4604*38fd1498Szrj 
4605*38fd1498Szrj 	  /* Check that dependence weakness is in proper range.  */
4606*38fd1498Szrj 	  do
4607*38fd1498Szrj 	    {
4608*38fd1498Szrj 	      if (ds & type)
4609*38fd1498Szrj 		get_dep_weak (ds, type);
4610*38fd1498Szrj 
4611*38fd1498Szrj 	      if (type == LAST_SPEC_TYPE)
4612*38fd1498Szrj 		break;
4613*38fd1498Szrj 	      type <<= SPEC_TYPE_SHIFT;
4614*38fd1498Szrj 	    }
4615*38fd1498Szrj 	  while (1);
4616*38fd1498Szrj 	}
4617*38fd1498Szrj 
4618*38fd1498Szrj       if (ds & BEGIN_SPEC)
4619*38fd1498Szrj 	{
4620*38fd1498Szrj 	  /* Only true dependence can be data speculative.  */
4621*38fd1498Szrj 	  if (ds & BEGIN_DATA)
4622*38fd1498Szrj 	    gcc_assert (ds & DEP_TRUE);
4623*38fd1498Szrj 
4624*38fd1498Szrj 	  /* Control dependencies in the insn scheduler are represented by
4625*38fd1498Szrj 	     anti-dependencies, therefore only anti dependence can be
4626*38fd1498Szrj 	     control speculative.  */
4627*38fd1498Szrj 	  if (ds & BEGIN_CONTROL)
4628*38fd1498Szrj 	    gcc_assert (ds & DEP_ANTI);
4629*38fd1498Szrj 	}
4630*38fd1498Szrj       else
4631*38fd1498Szrj 	{
4632*38fd1498Szrj 	  /* Subsequent speculations should resolve true dependencies.  */
4633*38fd1498Szrj 	  gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4634*38fd1498Szrj 	}
4635*38fd1498Szrj 
4636*38fd1498Szrj       /* Check that true and anti dependencies can't have other speculative
4637*38fd1498Szrj 	 statuses.  */
4638*38fd1498Szrj       if (ds & DEP_TRUE)
4639*38fd1498Szrj 	gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4640*38fd1498Szrj       /* An output dependence can't be speculative at all.  */
4641*38fd1498Szrj       gcc_assert (!(ds & DEP_OUTPUT));
4642*38fd1498Szrj       if (ds & DEP_ANTI)
4643*38fd1498Szrj 	gcc_assert (ds & BEGIN_CONTROL);
4644*38fd1498Szrj     }
4645*38fd1498Szrj }
4646*38fd1498Szrj 
4647*38fd1498Szrj /* The following code discovers opportunities to switch a memory reference
4648*38fd1498Szrj    and an increment by modifying the address.  We ensure that this is done
4649*38fd1498Szrj    only for dependencies that are only used to show a single register
4650*38fd1498Szrj    dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4651*38fd1498Szrj    instruction involved is subject to only one dep that can cause a pattern
4652*38fd1498Szrj    change.
4653*38fd1498Szrj 
4654*38fd1498Szrj    When we discover a suitable dependency, we fill in the dep_replacement
4655*38fd1498Szrj    structure to show how to modify the memory reference.  */
4656*38fd1498Szrj 
4657*38fd1498Szrj /* Holds information about a pair of memory reference and register increment
4658*38fd1498Szrj    insns which depend on each other, but could possibly be interchanged.  */
4659*38fd1498Szrj struct mem_inc_info
4660*38fd1498Szrj {
4661*38fd1498Szrj   rtx_insn *inc_insn;
4662*38fd1498Szrj   rtx_insn *mem_insn;
4663*38fd1498Szrj 
4664*38fd1498Szrj   rtx *mem_loc;
4665*38fd1498Szrj   /* A register occurring in the memory address for which we wish to break
4666*38fd1498Szrj      the dependence.  This must be identical to the destination register of
4667*38fd1498Szrj      the increment.  */
4668*38fd1498Szrj   rtx mem_reg0;
4669*38fd1498Szrj   /* Any kind of index that is added to that register.  */
4670*38fd1498Szrj   rtx mem_index;
4671*38fd1498Szrj   /* The constant offset used in the memory address.  */
4672*38fd1498Szrj   HOST_WIDE_INT mem_constant;
4673*38fd1498Szrj   /* The constant added in the increment insn.  Negated if the increment is
4674*38fd1498Szrj      after the memory address.  */
4675*38fd1498Szrj   HOST_WIDE_INT inc_constant;
4676*38fd1498Szrj   /* The source register used in the increment.  May be different from mem_reg0
4677*38fd1498Szrj      if the increment occurs before the memory address.  */
4678*38fd1498Szrj   rtx inc_input;
4679*38fd1498Szrj };
4680*38fd1498Szrj 
4681*38fd1498Szrj /* Verify that the memory location described in MII can be replaced with
4682*38fd1498Szrj    one using NEW_ADDR.  Return the new memory reference or NULL_RTX.  The
4683*38fd1498Szrj    insn remains unchanged by this function.  */
4684*38fd1498Szrj 
4685*38fd1498Szrj static rtx
attempt_change(struct mem_inc_info * mii,rtx new_addr)4686*38fd1498Szrj attempt_change (struct mem_inc_info *mii, rtx new_addr)
4687*38fd1498Szrj {
4688*38fd1498Szrj   rtx mem = *mii->mem_loc;
4689*38fd1498Szrj   rtx new_mem;
4690*38fd1498Szrj 
4691*38fd1498Szrj   /* Jump through a lot of hoops to keep the attributes up to date.  We
4692*38fd1498Szrj      do not want to call one of the change address variants that take
4693*38fd1498Szrj      an offset even though we know the offset in many cases.  These
4694*38fd1498Szrj      assume you are changing where the address is pointing by the
4695*38fd1498Szrj      offset.  */
4696*38fd1498Szrj   new_mem = replace_equiv_address_nv (mem, new_addr);
4697*38fd1498Szrj   if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4698*38fd1498Szrj     {
4699*38fd1498Szrj       if (sched_verbose >= 5)
4700*38fd1498Szrj 	fprintf (sched_dump, "validation failure\n");
4701*38fd1498Szrj       return NULL_RTX;
4702*38fd1498Szrj     }
4703*38fd1498Szrj 
4704*38fd1498Szrj   /* Put back the old one.  */
4705*38fd1498Szrj   validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4706*38fd1498Szrj 
4707*38fd1498Szrj   return new_mem;
4708*38fd1498Szrj }
4709*38fd1498Szrj 
4710*38fd1498Szrj /* Return true if INSN is of a form "a = b op c" where a and b are
4711*38fd1498Szrj    regs.  op is + if c is a reg and +|- if c is a const.  Fill in
4712*38fd1498Szrj    informantion in MII about what is found.
4713*38fd1498Szrj    BEFORE_MEM indicates whether the increment is found before or after
4714*38fd1498Szrj    a corresponding memory reference.  */
4715*38fd1498Szrj 
4716*38fd1498Szrj static bool
parse_add_or_inc(struct mem_inc_info * mii,rtx_insn * insn,bool before_mem)4717*38fd1498Szrj parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
4718*38fd1498Szrj {
4719*38fd1498Szrj   rtx pat = single_set (insn);
4720*38fd1498Szrj   rtx src, cst;
4721*38fd1498Szrj   bool regs_equal;
4722*38fd1498Szrj 
4723*38fd1498Szrj   if (RTX_FRAME_RELATED_P (insn) || !pat)
4724*38fd1498Szrj     return false;
4725*38fd1498Szrj 
4726*38fd1498Szrj   /* Do not allow breaking data dependencies for insns that are marked
4727*38fd1498Szrj      with REG_STACK_CHECK.  */
4728*38fd1498Szrj   if (find_reg_note (insn, REG_STACK_CHECK, NULL))
4729*38fd1498Szrj     return false;
4730*38fd1498Szrj 
4731*38fd1498Szrj   /* Result must be single reg.  */
4732*38fd1498Szrj   if (!REG_P (SET_DEST (pat)))
4733*38fd1498Szrj     return false;
4734*38fd1498Szrj 
4735*38fd1498Szrj   if (GET_CODE (SET_SRC (pat)) != PLUS)
4736*38fd1498Szrj     return false;
4737*38fd1498Szrj 
4738*38fd1498Szrj   mii->inc_insn = insn;
4739*38fd1498Szrj   src = SET_SRC (pat);
4740*38fd1498Szrj   mii->inc_input = XEXP (src, 0);
4741*38fd1498Szrj 
4742*38fd1498Szrj   if (!REG_P (XEXP (src, 0)))
4743*38fd1498Szrj     return false;
4744*38fd1498Szrj 
4745*38fd1498Szrj   if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4746*38fd1498Szrj     return false;
4747*38fd1498Szrj 
4748*38fd1498Szrj   cst = XEXP (src, 1);
4749*38fd1498Szrj   if (!CONST_INT_P (cst))
4750*38fd1498Szrj     return false;
4751*38fd1498Szrj   mii->inc_constant = INTVAL (cst);
4752*38fd1498Szrj 
4753*38fd1498Szrj   regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4754*38fd1498Szrj 
4755*38fd1498Szrj   if (!before_mem)
4756*38fd1498Szrj     {
4757*38fd1498Szrj       mii->inc_constant = -mii->inc_constant;
4758*38fd1498Szrj       if (!regs_equal)
4759*38fd1498Szrj 	return false;
4760*38fd1498Szrj     }
4761*38fd1498Szrj 
4762*38fd1498Szrj   if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
4763*38fd1498Szrj     {
4764*38fd1498Szrj       /* Note that the sign has already been reversed for !before_mem.  */
4765*38fd1498Szrj       if (STACK_GROWS_DOWNWARD)
4766*38fd1498Szrj 	return mii->inc_constant > 0;
4767*38fd1498Szrj       else
4768*38fd1498Szrj 	return mii->inc_constant < 0;
4769*38fd1498Szrj     }
4770*38fd1498Szrj   return true;
4771*38fd1498Szrj }
4772*38fd1498Szrj 
4773*38fd1498Szrj /* Once a suitable mem reference has been found and the corresponding data
4774*38fd1498Szrj    in MII has been filled in, this function is called to find a suitable
4775*38fd1498Szrj    add or inc insn involving the register we found in the memory
4776*38fd1498Szrj    reference.  */
4777*38fd1498Szrj 
4778*38fd1498Szrj static bool
find_inc(struct mem_inc_info * mii,bool backwards)4779*38fd1498Szrj find_inc (struct mem_inc_info *mii, bool backwards)
4780*38fd1498Szrj {
4781*38fd1498Szrj   sd_iterator_def sd_it;
4782*38fd1498Szrj   dep_t dep;
4783*38fd1498Szrj 
4784*38fd1498Szrj   sd_it = sd_iterator_start (mii->mem_insn,
4785*38fd1498Szrj 			     backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4786*38fd1498Szrj   while (sd_iterator_cond (&sd_it, &dep))
4787*38fd1498Szrj     {
4788*38fd1498Szrj       dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
4789*38fd1498Szrj       rtx_insn *pro = DEP_PRO (dep);
4790*38fd1498Szrj       rtx_insn *con = DEP_CON (dep);
4791*38fd1498Szrj       rtx_insn *inc_cand = backwards ? pro : con;
4792*38fd1498Szrj       if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4793*38fd1498Szrj 	goto next;
4794*38fd1498Szrj       if (parse_add_or_inc (mii, inc_cand, backwards))
4795*38fd1498Szrj 	{
4796*38fd1498Szrj 	  struct dep_replacement *desc;
4797*38fd1498Szrj 	  df_ref def;
4798*38fd1498Szrj 	  rtx newaddr, newmem;
4799*38fd1498Szrj 
4800*38fd1498Szrj 	  if (sched_verbose >= 5)
4801*38fd1498Szrj 	    fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4802*38fd1498Szrj 		     INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4803*38fd1498Szrj 
4804*38fd1498Szrj 	  /* Need to assure that none of the operands of the inc
4805*38fd1498Szrj 	     instruction are assigned to by the mem insn.  */
4806*38fd1498Szrj 	  FOR_EACH_INSN_DEF (def, mii->mem_insn)
4807*38fd1498Szrj 	    if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4808*38fd1498Szrj 		|| reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4809*38fd1498Szrj 	      {
4810*38fd1498Szrj 		if (sched_verbose >= 5)
4811*38fd1498Szrj 		  fprintf (sched_dump,
4812*38fd1498Szrj 			   "inc conflicts with store failure.\n");
4813*38fd1498Szrj 		goto next;
4814*38fd1498Szrj 	      }
4815*38fd1498Szrj 
4816*38fd1498Szrj 	  newaddr = mii->inc_input;
4817*38fd1498Szrj 	  if (mii->mem_index != NULL_RTX)
4818*38fd1498Szrj 	    newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4819*38fd1498Szrj 				    mii->mem_index);
4820*38fd1498Szrj 	  newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4821*38fd1498Szrj 				   mii->mem_constant + mii->inc_constant);
4822*38fd1498Szrj 	  newmem = attempt_change (mii, newaddr);
4823*38fd1498Szrj 	  if (newmem == NULL_RTX)
4824*38fd1498Szrj 	    goto next;
4825*38fd1498Szrj 	  if (sched_verbose >= 5)
4826*38fd1498Szrj 	    fprintf (sched_dump, "successful address replacement\n");
4827*38fd1498Szrj 	  desc = XCNEW (struct dep_replacement);
4828*38fd1498Szrj 	  DEP_REPLACE (dep) = desc;
4829*38fd1498Szrj 	  desc->loc = mii->mem_loc;
4830*38fd1498Szrj 	  desc->newval = newmem;
4831*38fd1498Szrj 	  desc->orig = *desc->loc;
4832*38fd1498Szrj 	  desc->insn = mii->mem_insn;
4833*38fd1498Szrj 	  move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4834*38fd1498Szrj 			 INSN_SPEC_BACK_DEPS (con));
4835*38fd1498Szrj 	  if (backwards)
4836*38fd1498Szrj 	    {
4837*38fd1498Szrj 	      FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
4838*38fd1498Szrj 		add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4839*38fd1498Szrj 				  REG_DEP_TRUE);
4840*38fd1498Szrj 	    }
4841*38fd1498Szrj 	  else
4842*38fd1498Szrj 	    {
4843*38fd1498Szrj 	      FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
4844*38fd1498Szrj 		add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4845*38fd1498Szrj 				  REG_DEP_ANTI);
4846*38fd1498Szrj 	    }
4847*38fd1498Szrj 	  return true;
4848*38fd1498Szrj 	}
4849*38fd1498Szrj     next:
4850*38fd1498Szrj       sd_iterator_next (&sd_it);
4851*38fd1498Szrj     }
4852*38fd1498Szrj   return false;
4853*38fd1498Szrj }
4854*38fd1498Szrj 
4855*38fd1498Szrj /* A recursive function that walks ADDRESS_OF_X to find memory references
4856*38fd1498Szrj    which could be modified during scheduling.  We call find_inc for each
4857*38fd1498Szrj    one we find that has a recognizable form.  MII holds information about
4858*38fd1498Szrj    the pair of memory/increment instructions.
4859*38fd1498Szrj    We ensure that every instruction with a memory reference (which will be
4860*38fd1498Szrj    the location of the replacement) is assigned at most one breakable
4861*38fd1498Szrj    dependency.  */
4862*38fd1498Szrj 
4863*38fd1498Szrj static bool
find_mem(struct mem_inc_info * mii,rtx * address_of_x)4864*38fd1498Szrj find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4865*38fd1498Szrj {
4866*38fd1498Szrj   rtx x = *address_of_x;
4867*38fd1498Szrj   enum rtx_code code = GET_CODE (x);
4868*38fd1498Szrj   const char *const fmt = GET_RTX_FORMAT (code);
4869*38fd1498Szrj   int i;
4870*38fd1498Szrj 
4871*38fd1498Szrj   if (code == MEM)
4872*38fd1498Szrj     {
4873*38fd1498Szrj       rtx reg0 = XEXP (x, 0);
4874*38fd1498Szrj 
4875*38fd1498Szrj       mii->mem_loc = address_of_x;
4876*38fd1498Szrj       mii->mem_index = NULL_RTX;
4877*38fd1498Szrj       mii->mem_constant = 0;
4878*38fd1498Szrj       if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4879*38fd1498Szrj 	{
4880*38fd1498Szrj 	  mii->mem_constant = INTVAL (XEXP (reg0, 1));
4881*38fd1498Szrj 	  reg0 = XEXP (reg0, 0);
4882*38fd1498Szrj 	}
4883*38fd1498Szrj       if (GET_CODE (reg0) == PLUS)
4884*38fd1498Szrj 	{
4885*38fd1498Szrj 	  mii->mem_index = XEXP (reg0, 1);
4886*38fd1498Szrj 	  reg0 = XEXP (reg0, 0);
4887*38fd1498Szrj 	}
4888*38fd1498Szrj       if (REG_P (reg0))
4889*38fd1498Szrj 	{
4890*38fd1498Szrj 	  df_ref use;
4891*38fd1498Szrj 	  int occurrences = 0;
4892*38fd1498Szrj 
4893*38fd1498Szrj 	  /* Make sure this reg appears only once in this insn.  Can't use
4894*38fd1498Szrj 	     count_occurrences since that only works for pseudos.  */
4895*38fd1498Szrj 	  FOR_EACH_INSN_USE (use, mii->mem_insn)
4896*38fd1498Szrj 	    if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
4897*38fd1498Szrj 	      if (++occurrences > 1)
4898*38fd1498Szrj 		{
4899*38fd1498Szrj 		  if (sched_verbose >= 5)
4900*38fd1498Szrj 		    fprintf (sched_dump, "mem count failure\n");
4901*38fd1498Szrj 		  return false;
4902*38fd1498Szrj 		}
4903*38fd1498Szrj 
4904*38fd1498Szrj 	  mii->mem_reg0 = reg0;
4905*38fd1498Szrj 	  return find_inc (mii, true) || find_inc (mii, false);
4906*38fd1498Szrj 	}
4907*38fd1498Szrj       return false;
4908*38fd1498Szrj     }
4909*38fd1498Szrj 
4910*38fd1498Szrj   if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4911*38fd1498Szrj     {
4912*38fd1498Szrj       /* If REG occurs inside a MEM used in a bit-field reference,
4913*38fd1498Szrj 	 that is unacceptable.  */
4914*38fd1498Szrj       return false;
4915*38fd1498Szrj     }
4916*38fd1498Szrj 
4917*38fd1498Szrj   /* Time for some deep diving.  */
4918*38fd1498Szrj   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4919*38fd1498Szrj     {
4920*38fd1498Szrj       if (fmt[i] == 'e')
4921*38fd1498Szrj 	{
4922*38fd1498Szrj 	  if (find_mem (mii, &XEXP (x, i)))
4923*38fd1498Szrj 	    return true;
4924*38fd1498Szrj 	}
4925*38fd1498Szrj       else if (fmt[i] == 'E')
4926*38fd1498Szrj 	{
4927*38fd1498Szrj 	  int j;
4928*38fd1498Szrj 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4929*38fd1498Szrj 	    if (find_mem (mii, &XVECEXP (x, i, j)))
4930*38fd1498Szrj 	      return true;
4931*38fd1498Szrj 	}
4932*38fd1498Szrj     }
4933*38fd1498Szrj   return false;
4934*38fd1498Szrj }
4935*38fd1498Szrj 
4936*38fd1498Szrj 
4937*38fd1498Szrj /* Examine the instructions between HEAD and TAIL and try to find
4938*38fd1498Szrj    dependencies that can be broken by modifying one of the patterns.  */
4939*38fd1498Szrj 
4940*38fd1498Szrj void
find_modifiable_mems(rtx_insn * head,rtx_insn * tail)4941*38fd1498Szrj find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
4942*38fd1498Szrj {
4943*38fd1498Szrj   rtx_insn *insn, *next_tail = NEXT_INSN (tail);
4944*38fd1498Szrj   int success_in_block = 0;
4945*38fd1498Szrj 
4946*38fd1498Szrj   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4947*38fd1498Szrj     {
4948*38fd1498Szrj       struct mem_inc_info mii;
4949*38fd1498Szrj 
4950*38fd1498Szrj       if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4951*38fd1498Szrj 	continue;
4952*38fd1498Szrj 
4953*38fd1498Szrj       mii.mem_insn = insn;
4954*38fd1498Szrj       if (find_mem (&mii, &PATTERN (insn)))
4955*38fd1498Szrj 	success_in_block++;
4956*38fd1498Szrj     }
4957*38fd1498Szrj   if (success_in_block && sched_verbose >= 5)
4958*38fd1498Szrj     fprintf (sched_dump, "%d candidates for address modification found.\n",
4959*38fd1498Szrj 	     success_in_block);
4960*38fd1498Szrj }
4961*38fd1498Szrj 
4962*38fd1498Szrj #endif /* INSN_SCHEDULING */
4963