1 /* Instruction scheduling pass.  This file computes dependencies between
2    instructions.
3    Copyright (C) 1992-2019 Free Software Foundation, Inc.
4    Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5    and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 
7 This file is part of GCC.
8 
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13 
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
17 for more details.
18 
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3.  If not see
21 <http://www.gnu.org/licenses/>.  */
22 
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "df.h"
31 #include "insn-config.h"
32 #include "regs.h"
33 #include "memmodel.h"
34 #include "ira.h"
35 #include "ira-int.h"
36 #include "insn-attr.h"
37 #include "cfgbuild.h"
38 #include "sched-int.h"
39 #include "params.h"
40 #include "cselib.h"
41 
42 #ifdef INSN_SCHEDULING
43 
44 /* Holds current parameters for the dependency analyzer.  */
45 struct sched_deps_info_def *sched_deps_info;
46 
47 /* The data is specific to the Haifa scheduler.  */
48 vec<haifa_deps_insn_data_def>
49     h_d_i_d = vNULL;
50 
51 /* Return the major type present in the DS.  */
52 enum reg_note
ds_to_dk(ds_t ds)53 ds_to_dk (ds_t ds)
54 {
55   if (ds & DEP_TRUE)
56     return REG_DEP_TRUE;
57 
58   if (ds & DEP_OUTPUT)
59     return REG_DEP_OUTPUT;
60 
61   if (ds & DEP_CONTROL)
62     return REG_DEP_CONTROL;
63 
64   gcc_assert (ds & DEP_ANTI);
65 
66   return REG_DEP_ANTI;
67 }
68 
69 /* Return equivalent dep_status.  */
70 ds_t
dk_to_ds(enum reg_note dk)71 dk_to_ds (enum reg_note dk)
72 {
73   switch (dk)
74     {
75     case REG_DEP_TRUE:
76       return DEP_TRUE;
77 
78     case REG_DEP_OUTPUT:
79       return DEP_OUTPUT;
80 
81     case REG_DEP_CONTROL:
82       return DEP_CONTROL;
83 
84     default:
85       gcc_assert (dk == REG_DEP_ANTI);
86       return DEP_ANTI;
87     }
88 }
89 
90 /* Functions to operate with dependence information container - dep_t.  */
91 
92 /* Init DEP with the arguments.  */
93 void
init_dep_1(dep_t dep,rtx_insn * pro,rtx_insn * con,enum reg_note type,ds_t ds)94 init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
95 {
96   DEP_PRO (dep) = pro;
97   DEP_CON (dep) = con;
98   DEP_TYPE (dep) = type;
99   DEP_STATUS (dep) = ds;
100   DEP_COST (dep) = UNKNOWN_DEP_COST;
101   DEP_NONREG (dep) = 0;
102   DEP_MULTIPLE (dep) = 0;
103   DEP_REPLACE (dep) = NULL;
104 }
105 
106 /* Init DEP with the arguments.
107    While most of the scheduler (including targets) only need the major type
108    of the dependency, it is convenient to hide full dep_status from them.  */
109 void
init_dep(dep_t dep,rtx_insn * pro,rtx_insn * con,enum reg_note kind)110 init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
111 {
112   ds_t ds;
113 
114   if ((current_sched_info->flags & USE_DEPS_LIST))
115     ds = dk_to_ds (kind);
116   else
117     ds = 0;
118 
119   init_dep_1 (dep, pro, con, kind, ds);
120 }
121 
122 /* Make a copy of FROM in TO.  */
123 static void
copy_dep(dep_t to,dep_t from)124 copy_dep (dep_t to, dep_t from)
125 {
126   memcpy (to, from, sizeof (*to));
127 }
128 
129 static void dump_ds (FILE *, ds_t);
130 
131 /* Define flags for dump_dep ().  */
132 
133 /* Dump producer of the dependence.  */
134 #define DUMP_DEP_PRO (2)
135 
136 /* Dump consumer of the dependence.  */
137 #define DUMP_DEP_CON (4)
138 
139 /* Dump type of the dependence.  */
140 #define DUMP_DEP_TYPE (8)
141 
142 /* Dump status of the dependence.  */
143 #define DUMP_DEP_STATUS (16)
144 
145 /* Dump all information about the dependence.  */
146 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE	\
147 		      |DUMP_DEP_STATUS)
148 
149 /* Dump DEP to DUMP.
150    FLAGS is a bit mask specifying what information about DEP needs
151    to be printed.
152    If FLAGS has the very first bit set, then dump all information about DEP
153    and propagate this bit into the callee dump functions.  */
154 static void
dump_dep(FILE * dump,dep_t dep,int flags)155 dump_dep (FILE *dump, dep_t dep, int flags)
156 {
157   if (flags & 1)
158     flags |= DUMP_DEP_ALL;
159 
160   fprintf (dump, "<");
161 
162   if (flags & DUMP_DEP_PRO)
163     fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
164 
165   if (flags & DUMP_DEP_CON)
166     fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
167 
168   if (flags & DUMP_DEP_TYPE)
169     {
170       char t;
171       enum reg_note type = DEP_TYPE (dep);
172 
173       switch (type)
174 	{
175 	case REG_DEP_TRUE:
176 	  t = 't';
177 	  break;
178 
179 	case REG_DEP_OUTPUT:
180 	  t = 'o';
181 	  break;
182 
183 	case REG_DEP_CONTROL:
184 	  t = 'c';
185 	  break;
186 
187 	case REG_DEP_ANTI:
188 	  t = 'a';
189 	  break;
190 
191 	default:
192 	  gcc_unreachable ();
193 	  break;
194 	}
195 
196       fprintf (dump, "%c; ", t);
197     }
198 
199   if (flags & DUMP_DEP_STATUS)
200     {
201       if (current_sched_info->flags & USE_DEPS_LIST)
202 	dump_ds (dump, DEP_STATUS (dep));
203     }
204 
205   fprintf (dump, ">");
206 }
207 
208 /* Default flags for dump_dep ().  */
209 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
210 
211 /* Dump all fields of DEP to STDERR.  */
212 void
sd_debug_dep(dep_t dep)213 sd_debug_dep (dep_t dep)
214 {
215   dump_dep (stderr, dep, 1);
216   fprintf (stderr, "\n");
217 }
218 
219 /* Determine whether DEP is a dependency link of a non-debug insn on a
220    debug insn.  */
221 
222 static inline bool
depl_on_debug_p(dep_link_t dep)223 depl_on_debug_p (dep_link_t dep)
224 {
225   return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
226 	  && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
227 }
228 
229 /* Functions to operate with a single link from the dependencies lists -
230    dep_link_t.  */
231 
232 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
233    PREV_NEXT_P.  */
234 static void
attach_dep_link(dep_link_t l,dep_link_t * prev_nextp)235 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
236 {
237   dep_link_t next = *prev_nextp;
238 
239   gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
240 	      && DEP_LINK_NEXT (l) == NULL);
241 
242   /* Init node being inserted.  */
243   DEP_LINK_PREV_NEXTP (l) = prev_nextp;
244   DEP_LINK_NEXT (l) = next;
245 
246   /* Fix next node.  */
247   if (next != NULL)
248     {
249       gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
250 
251       DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
252     }
253 
254   /* Fix prev node.  */
255   *prev_nextp = l;
256 }
257 
258 /* Add dep_link LINK to deps_list L.  */
259 static void
add_to_deps_list(dep_link_t link,deps_list_t l)260 add_to_deps_list (dep_link_t link, deps_list_t l)
261 {
262   attach_dep_link (link, &DEPS_LIST_FIRST (l));
263 
264   /* Don't count debug deps.  */
265   if (!depl_on_debug_p (link))
266     ++DEPS_LIST_N_LINKS (l);
267 }
268 
269 /* Detach dep_link L from the list.  */
270 static void
detach_dep_link(dep_link_t l)271 detach_dep_link (dep_link_t l)
272 {
273   dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
274   dep_link_t next = DEP_LINK_NEXT (l);
275 
276   *prev_nextp = next;
277 
278   if (next != NULL)
279     DEP_LINK_PREV_NEXTP (next) = prev_nextp;
280 
281   DEP_LINK_PREV_NEXTP (l) = NULL;
282   DEP_LINK_NEXT (l) = NULL;
283 }
284 
285 /* Remove link LINK from list LIST.  */
286 static void
remove_from_deps_list(dep_link_t link,deps_list_t list)287 remove_from_deps_list (dep_link_t link, deps_list_t list)
288 {
289   detach_dep_link (link);
290 
291   /* Don't count debug deps.  */
292   if (!depl_on_debug_p (link))
293     --DEPS_LIST_N_LINKS (list);
294 }
295 
296 /* Move link LINK from list FROM to list TO.  */
297 static void
move_dep_link(dep_link_t link,deps_list_t from,deps_list_t to)298 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
299 {
300   remove_from_deps_list (link, from);
301   add_to_deps_list (link, to);
302 }
303 
304 /* Return true of LINK is not attached to any list.  */
305 static bool
dep_link_is_detached_p(dep_link_t link)306 dep_link_is_detached_p (dep_link_t link)
307 {
308   return DEP_LINK_PREV_NEXTP (link) == NULL;
309 }
310 
311 /* Pool to hold all dependency nodes (dep_node_t).  */
312 static object_allocator<_dep_node> *dn_pool;
313 
314 /* Number of dep_nodes out there.  */
315 static int dn_pool_diff = 0;
316 
317 /* Create a dep_node.  */
318 static dep_node_t
create_dep_node(void)319 create_dep_node (void)
320 {
321   dep_node_t n = dn_pool->allocate ();
322   dep_link_t back = DEP_NODE_BACK (n);
323   dep_link_t forw = DEP_NODE_FORW (n);
324 
325   DEP_LINK_NODE (back) = n;
326   DEP_LINK_NEXT (back) = NULL;
327   DEP_LINK_PREV_NEXTP (back) = NULL;
328 
329   DEP_LINK_NODE (forw) = n;
330   DEP_LINK_NEXT (forw) = NULL;
331   DEP_LINK_PREV_NEXTP (forw) = NULL;
332 
333   ++dn_pool_diff;
334 
335   return n;
336 }
337 
338 /* Delete dep_node N.  N must not be connected to any deps_list.  */
339 static void
delete_dep_node(dep_node_t n)340 delete_dep_node (dep_node_t n)
341 {
342   gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
343 	      && dep_link_is_detached_p (DEP_NODE_FORW (n)));
344 
345   XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
346 
347   --dn_pool_diff;
348 
349   dn_pool->remove (n);
350 }
351 
352 /* Pool to hold dependencies lists (deps_list_t).  */
353 static object_allocator<_deps_list> *dl_pool;
354 
355 /* Number of deps_lists out there.  */
356 static int dl_pool_diff = 0;
357 
358 /* Functions to operate with dependences lists - deps_list_t.  */
359 
360 /* Return true if list L is empty.  */
361 static bool
deps_list_empty_p(deps_list_t l)362 deps_list_empty_p (deps_list_t l)
363 {
364   return DEPS_LIST_N_LINKS (l) == 0;
365 }
366 
367 /* Create a new deps_list.  */
368 static deps_list_t
create_deps_list(void)369 create_deps_list (void)
370 {
371   deps_list_t l = dl_pool->allocate ();
372 
373   DEPS_LIST_FIRST (l) = NULL;
374   DEPS_LIST_N_LINKS (l) = 0;
375 
376   ++dl_pool_diff;
377   return l;
378 }
379 
380 /* Free deps_list L.  */
381 static void
free_deps_list(deps_list_t l)382 free_deps_list (deps_list_t l)
383 {
384   gcc_assert (deps_list_empty_p (l));
385 
386   --dl_pool_diff;
387 
388   dl_pool->remove (l);
389 }
390 
391 /* Return true if there is no dep_nodes and deps_lists out there.
392    After the region is scheduled all the dependency nodes and lists
393    should [generally] be returned to pool.  */
394 bool
deps_pools_are_empty_p(void)395 deps_pools_are_empty_p (void)
396 {
397   return dn_pool_diff == 0 && dl_pool_diff == 0;
398 }
399 
400 /* Remove all elements from L.  */
401 static void
clear_deps_list(deps_list_t l)402 clear_deps_list (deps_list_t l)
403 {
404   do
405     {
406       dep_link_t link = DEPS_LIST_FIRST (l);
407 
408       if (link == NULL)
409 	break;
410 
411       remove_from_deps_list (link, l);
412     }
413   while (1);
414 }
415 
416 /* Decide whether a dependency should be treated as a hard or a speculative
417    dependency.  */
418 static bool
dep_spec_p(dep_t dep)419 dep_spec_p (dep_t dep)
420 {
421   if (current_sched_info->flags & DO_SPECULATION)
422     {
423       if (DEP_STATUS (dep) & SPECULATIVE)
424 	return true;
425     }
426   if (current_sched_info->flags & DO_PREDICATION)
427     {
428       if (DEP_TYPE (dep) == REG_DEP_CONTROL)
429 	return true;
430     }
431   if (DEP_REPLACE (dep) != NULL)
432     return true;
433   return false;
434 }
435 
436 static regset reg_pending_sets;
437 static regset reg_pending_clobbers;
438 static regset reg_pending_uses;
439 static regset reg_pending_control_uses;
440 static enum reg_pending_barrier_mode reg_pending_barrier;
441 
442 /* Hard registers implicitly clobbered or used (or may be implicitly
443    clobbered or used) by the currently analyzed insn.  For example,
444    insn in its constraint has one register class.  Even if there is
445    currently no hard register in the insn, the particular hard
446    register will be in the insn after reload pass because the
447    constraint requires it.  */
448 static HARD_REG_SET implicit_reg_pending_clobbers;
449 static HARD_REG_SET implicit_reg_pending_uses;
450 
451 /* To speed up the test for duplicate dependency links we keep a
452    record of dependencies created by add_dependence when the average
453    number of instructions in a basic block is very large.
454 
455    Studies have shown that there is typically around 5 instructions between
456    branches for typical C code.  So we can make a guess that the average
457    basic block is approximately 5 instructions long; we will choose 100X
458    the average size as a very large basic block.
459 
460    Each insn has associated bitmaps for its dependencies.  Each bitmap
461    has enough entries to represent a dependency on any other insn in
462    the insn chain.  All bitmap for true dependencies cache is
463    allocated then the rest two ones are also allocated.  */
464 static bitmap true_dependency_cache = NULL;
465 static bitmap output_dependency_cache = NULL;
466 static bitmap anti_dependency_cache = NULL;
467 static bitmap control_dependency_cache = NULL;
468 static bitmap spec_dependency_cache = NULL;
469 static int cache_size;
470 
471 /* True if we should mark added dependencies as a non-register deps.  */
472 static bool mark_as_hard;
473 
474 static int deps_may_trap_p (const_rtx);
475 static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
476 static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
477 				 enum reg_note, bool);
478 static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
479 					  rtx_insn_list **, int, enum reg_note,
480 					  bool);
481 static void delete_all_dependences (rtx_insn *);
482 static void chain_to_prev_insn (rtx_insn *);
483 
484 static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
485 static void sched_analyze_1 (struct deps_desc *, rtx, rtx_insn *);
486 static void sched_analyze_2 (struct deps_desc *, rtx, rtx_insn *);
487 static void sched_analyze_insn (struct deps_desc *, rtx, rtx_insn *);
488 
489 static bool sched_has_condition_p (const rtx_insn *);
490 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
491 
492 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
493 							  rtx, rtx);
494 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
495 
496 static void check_dep (dep_t, bool);
497 
498 
499 /* Return nonzero if a load of the memory reference MEM can cause a trap.  */
500 
501 static int
deps_may_trap_p(const_rtx mem)502 deps_may_trap_p (const_rtx mem)
503 {
504   const_rtx addr = XEXP (mem, 0);
505 
506   if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
507     {
508       const_rtx t = get_reg_known_value (REGNO (addr));
509       if (t)
510 	addr = t;
511     }
512   return rtx_addr_can_trap_p (addr);
513 }
514 
515 
516 /* Find the condition under which INSN is executed.  If REV is not NULL,
517    it is set to TRUE when the returned comparison should be reversed
518    to get the actual condition.  */
519 static rtx
sched_get_condition_with_rev_uncached(const rtx_insn * insn,bool * rev)520 sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev)
521 {
522   rtx pat = PATTERN (insn);
523   rtx src;
524 
525   if (rev)
526     *rev = false;
527 
528   if (GET_CODE (pat) == COND_EXEC)
529     return COND_EXEC_TEST (pat);
530 
531   if (!any_condjump_p (insn) || !onlyjump_p (insn))
532     return 0;
533 
534   src = SET_SRC (pc_set (insn));
535 
536   if (XEXP (src, 2) == pc_rtx)
537     return XEXP (src, 0);
538   else if (XEXP (src, 1) == pc_rtx)
539     {
540       rtx cond = XEXP (src, 0);
541       enum rtx_code revcode = reversed_comparison_code (cond, insn);
542 
543       if (revcode == UNKNOWN)
544 	return 0;
545 
546       if (rev)
547 	*rev = true;
548       return cond;
549     }
550 
551   return 0;
552 }
553 
554 /* Return the condition under which INSN does not execute (i.e.  the
555    not-taken condition for a conditional branch), or NULL if we cannot
556    find such a condition.  The caller should make a copy of the condition
557    before using it.  */
558 rtx
sched_get_reverse_condition_uncached(const rtx_insn * insn)559 sched_get_reverse_condition_uncached (const rtx_insn *insn)
560 {
561   bool rev;
562   rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
563   if (cond == NULL_RTX)
564     return cond;
565   if (!rev)
566     {
567       enum rtx_code revcode = reversed_comparison_code (cond, insn);
568       cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
569 			     XEXP (cond, 0),
570 			     XEXP (cond, 1));
571     }
572   return cond;
573 }
574 
575 /* Caching variant of sched_get_condition_with_rev_uncached.
576    We only do actual work the first time we come here for an insn; the
577    results are cached in INSN_CACHED_COND and INSN_REVERSE_COND.  */
578 static rtx
sched_get_condition_with_rev(const rtx_insn * insn,bool * rev)579 sched_get_condition_with_rev (const rtx_insn *insn, bool *rev)
580 {
581   bool tmp;
582 
583   if (INSN_LUID (insn) == 0)
584     return sched_get_condition_with_rev_uncached (insn, rev);
585 
586   if (INSN_CACHED_COND (insn) == const_true_rtx)
587     return NULL_RTX;
588 
589   if (INSN_CACHED_COND (insn) != NULL_RTX)
590     {
591       if (rev)
592 	*rev = INSN_REVERSE_COND (insn);
593       return INSN_CACHED_COND (insn);
594     }
595 
596   INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
597   INSN_REVERSE_COND (insn) = tmp;
598 
599   if (INSN_CACHED_COND (insn) == NULL_RTX)
600     {
601       INSN_CACHED_COND (insn) = const_true_rtx;
602       return NULL_RTX;
603     }
604 
605   if (rev)
606     *rev = INSN_REVERSE_COND (insn);
607   return INSN_CACHED_COND (insn);
608 }
609 
610 /* True when we can find a condition under which INSN is executed.  */
611 static bool
sched_has_condition_p(const rtx_insn * insn)612 sched_has_condition_p (const rtx_insn *insn)
613 {
614   return !! sched_get_condition_with_rev (insn, NULL);
615 }
616 
617 
618 
619 /* Return nonzero if conditions COND1 and COND2 can never be both true.  */
620 static int
conditions_mutex_p(const_rtx cond1,const_rtx cond2,bool rev1,bool rev2)621 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
622 {
623   if (COMPARISON_P (cond1)
624       && COMPARISON_P (cond2)
625       && GET_CODE (cond1) ==
626 	  (rev1==rev2
627 	  ? reversed_comparison_code (cond2, NULL)
628 	  : GET_CODE (cond2))
629       && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
630       && XEXP (cond1, 1) == XEXP (cond2, 1))
631     return 1;
632   return 0;
633 }
634 
635 /* Return true if insn1 and insn2 can never depend on one another because
636    the conditions under which they are executed are mutually exclusive.  */
637 bool
sched_insns_conditions_mutex_p(const rtx_insn * insn1,const rtx_insn * insn2)638 sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2)
639 {
640   rtx cond1, cond2;
641   bool rev1 = false, rev2 = false;
642 
643   /* df doesn't handle conditional lifetimes entirely correctly;
644      calls mess up the conditional lifetimes.  */
645   if (!CALL_P (insn1) && !CALL_P (insn2))
646     {
647       cond1 = sched_get_condition_with_rev (insn1, &rev1);
648       cond2 = sched_get_condition_with_rev (insn2, &rev2);
649       if (cond1 && cond2
650 	  && conditions_mutex_p (cond1, cond2, rev1, rev2)
651 	  /* Make sure first instruction doesn't affect condition of second
652 	     instruction if switched.  */
653 	  && !modified_in_p (cond1, insn2)
654 	  /* Make sure second instruction doesn't affect condition of first
655 	     instruction if switched.  */
656 	  && !modified_in_p (cond2, insn1))
657 	return true;
658     }
659   return false;
660 }
661 
662 
663 /* Return true if INSN can potentially be speculated with type DS.  */
664 bool
sched_insn_is_legitimate_for_speculation_p(const rtx_insn * insn,ds_t ds)665 sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds)
666 {
667   if (HAS_INTERNAL_DEP (insn))
668     return false;
669 
670   if (!NONJUMP_INSN_P (insn))
671     return false;
672 
673   if (SCHED_GROUP_P (insn))
674     return false;
675 
676   if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn)))
677     return false;
678 
679   if (side_effects_p (PATTERN (insn)))
680     return false;
681 
682   if (ds & BE_IN_SPEC)
683     /* The following instructions, which depend on a speculatively scheduled
684        instruction, cannot be speculatively scheduled along.  */
685     {
686       if (may_trap_or_fault_p (PATTERN (insn)))
687 	/* If instruction might fault, it cannot be speculatively scheduled.
688 	   For control speculation it's obvious why and for data speculation
689 	   it's because the insn might get wrong input if speculation
690 	   wasn't successful.  */
691 	return false;
692 
693       if ((ds & BE_IN_DATA)
694 	  && sched_has_condition_p (insn))
695 	/* If this is a predicated instruction, then it cannot be
696 	   speculatively scheduled.  See PR35659.  */
697 	return false;
698     }
699 
700   return true;
701 }
702 
703 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
704    initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
705    and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
706    This function is used to switch sd_iterator to the next list.
707    !!! For internal use only.  Might consider moving it to sched-int.h.  */
708 void
sd_next_list(const_rtx insn,sd_list_types_def * types_ptr,deps_list_t * list_ptr,bool * resolved_p_ptr)709 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
710 	      deps_list_t *list_ptr, bool *resolved_p_ptr)
711 {
712   sd_list_types_def types = *types_ptr;
713 
714   if (types & SD_LIST_HARD_BACK)
715     {
716       *list_ptr = INSN_HARD_BACK_DEPS (insn);
717       *resolved_p_ptr = false;
718       *types_ptr = types & ~SD_LIST_HARD_BACK;
719     }
720   else if (types & SD_LIST_SPEC_BACK)
721     {
722       *list_ptr = INSN_SPEC_BACK_DEPS (insn);
723       *resolved_p_ptr = false;
724       *types_ptr = types & ~SD_LIST_SPEC_BACK;
725     }
726   else if (types & SD_LIST_FORW)
727     {
728       *list_ptr = INSN_FORW_DEPS (insn);
729       *resolved_p_ptr = false;
730       *types_ptr = types & ~SD_LIST_FORW;
731     }
732   else if (types & SD_LIST_RES_BACK)
733     {
734       *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
735       *resolved_p_ptr = true;
736       *types_ptr = types & ~SD_LIST_RES_BACK;
737     }
738   else if (types & SD_LIST_RES_FORW)
739     {
740       *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
741       *resolved_p_ptr = true;
742       *types_ptr = types & ~SD_LIST_RES_FORW;
743     }
744   else
745     {
746       *list_ptr = NULL;
747       *resolved_p_ptr = false;
748       *types_ptr = SD_LIST_NONE;
749     }
750 }
751 
752 /* Return the summary size of INSN's lists defined by LIST_TYPES.  */
753 int
sd_lists_size(const_rtx insn,sd_list_types_def list_types)754 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
755 {
756   int size = 0;
757 
758   while (list_types != SD_LIST_NONE)
759     {
760       deps_list_t list;
761       bool resolved_p;
762 
763       sd_next_list (insn, &list_types, &list, &resolved_p);
764       if (list)
765 	size += DEPS_LIST_N_LINKS (list);
766     }
767 
768   return size;
769 }
770 
771 /* Return true if INSN's lists defined by LIST_TYPES are all empty.  */
772 
773 bool
sd_lists_empty_p(const_rtx insn,sd_list_types_def list_types)774 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
775 {
776   while (list_types != SD_LIST_NONE)
777     {
778       deps_list_t list;
779       bool resolved_p;
780 
781       sd_next_list (insn, &list_types, &list, &resolved_p);
782       if (!deps_list_empty_p (list))
783 	return false;
784     }
785 
786   return true;
787 }
788 
789 /* Initialize data for INSN.  */
790 void
sd_init_insn(rtx_insn * insn)791 sd_init_insn (rtx_insn *insn)
792 {
793   INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
794   INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
795   INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
796   INSN_FORW_DEPS (insn) = create_deps_list ();
797   INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
798 
799   /* ??? It would be nice to allocate dependency caches here.  */
800 }
801 
802 /* Free data for INSN.  */
803 void
sd_finish_insn(rtx_insn * insn)804 sd_finish_insn (rtx_insn *insn)
805 {
806   /* ??? It would be nice to deallocate dependency caches here.  */
807 
808   free_deps_list (INSN_HARD_BACK_DEPS (insn));
809   INSN_HARD_BACK_DEPS (insn) = NULL;
810 
811   free_deps_list (INSN_SPEC_BACK_DEPS (insn));
812   INSN_SPEC_BACK_DEPS (insn) = NULL;
813 
814   free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
815   INSN_RESOLVED_BACK_DEPS (insn) = NULL;
816 
817   free_deps_list (INSN_FORW_DEPS (insn));
818   INSN_FORW_DEPS (insn) = NULL;
819 
820   free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
821   INSN_RESOLVED_FORW_DEPS (insn) = NULL;
822 }
823 
824 /* Find a dependency between producer PRO and consumer CON.
825    Search through resolved dependency lists if RESOLVED_P is true.
826    If no such dependency is found return NULL,
827    otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
828    with an iterator pointing to it.  */
829 static dep_t
sd_find_dep_between_no_cache(rtx pro,rtx con,bool resolved_p,sd_iterator_def * sd_it_ptr)830 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
831 			      sd_iterator_def *sd_it_ptr)
832 {
833   sd_list_types_def pro_list_type;
834   sd_list_types_def con_list_type;
835   sd_iterator_def sd_it;
836   dep_t dep;
837   bool found_p = false;
838 
839   if (resolved_p)
840     {
841       pro_list_type = SD_LIST_RES_FORW;
842       con_list_type = SD_LIST_RES_BACK;
843     }
844   else
845     {
846       pro_list_type = SD_LIST_FORW;
847       con_list_type = SD_LIST_BACK;
848     }
849 
850   /* Walk through either back list of INSN or forw list of ELEM
851      depending on which one is shorter.  */
852   if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
853     {
854       /* Find the dep_link with producer PRO in consumer's back_deps.  */
855       FOR_EACH_DEP (con, con_list_type, sd_it, dep)
856 	if (DEP_PRO (dep) == pro)
857 	  {
858 	    found_p = true;
859 	    break;
860 	  }
861     }
862   else
863     {
864       /* Find the dep_link with consumer CON in producer's forw_deps.  */
865       FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
866 	if (DEP_CON (dep) == con)
867 	  {
868 	    found_p = true;
869 	    break;
870 	  }
871     }
872 
873   if (found_p)
874     {
875       if (sd_it_ptr != NULL)
876 	*sd_it_ptr = sd_it;
877 
878       return dep;
879     }
880 
881   return NULL;
882 }
883 
884 /* Find a dependency between producer PRO and consumer CON.
885    Use dependency [if available] to check if dependency is present at all.
886    Search through resolved dependency lists if RESOLVED_P is true.
887    If the dependency or NULL if none found.  */
888 dep_t
sd_find_dep_between(rtx pro,rtx con,bool resolved_p)889 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
890 {
891   if (true_dependency_cache != NULL)
892     /* Avoiding the list walk below can cut compile times dramatically
893        for some code.  */
894     {
895       int elem_luid = INSN_LUID (pro);
896       int insn_luid = INSN_LUID (con);
897 
898       if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
899 	  && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
900 	  && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
901 	  && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
902 	return NULL;
903     }
904 
905   return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
906 }
907 
908 /* Add or update  a dependence described by DEP.
909    MEM1 and MEM2, if non-null, correspond to memory locations in case of
910    data speculation.
911 
912    The function returns a value indicating if an old entry has been changed
913    or a new entry has been added to insn's backward deps.
914 
915    This function merely checks if producer and consumer is the same insn
916    and doesn't create a dep in this case.  Actual manipulation of
917    dependence data structures is performed in add_or_update_dep_1.  */
918 static enum DEPS_ADJUST_RESULT
maybe_add_or_update_dep_1(dep_t dep,bool resolved_p,rtx mem1,rtx mem2)919 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
920 {
921   rtx_insn *elem = DEP_PRO (dep);
922   rtx_insn *insn = DEP_CON (dep);
923 
924   gcc_assert (INSN_P (insn) && INSN_P (elem));
925 
926   /* Don't depend an insn on itself.  */
927   if (insn == elem)
928     {
929       if (sched_deps_info->generate_spec_deps)
930         /* INSN has an internal dependence, which we can't overcome.  */
931         HAS_INTERNAL_DEP (insn) = 1;
932 
933       return DEP_NODEP;
934     }
935 
936   return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
937 }
938 
939 /* Ask dependency caches what needs to be done for dependence DEP.
940    Return DEP_CREATED if new dependence should be created and there is no
941    need to try to find one searching the dependencies lists.
942    Return DEP_PRESENT if there already is a dependence described by DEP and
943    hence nothing is to be done.
944    Return DEP_CHANGED if there already is a dependence, but it should be
945    updated to incorporate additional information from DEP.  */
946 static enum DEPS_ADJUST_RESULT
ask_dependency_caches(dep_t dep)947 ask_dependency_caches (dep_t dep)
948 {
949   int elem_luid = INSN_LUID (DEP_PRO (dep));
950   int insn_luid = INSN_LUID (DEP_CON (dep));
951 
952   gcc_assert (true_dependency_cache != NULL
953 	      && output_dependency_cache != NULL
954 	      && anti_dependency_cache != NULL
955 	      && control_dependency_cache != NULL);
956 
957   if (!(current_sched_info->flags & USE_DEPS_LIST))
958     {
959       enum reg_note present_dep_type;
960 
961       if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
962 	present_dep_type = REG_DEP_TRUE;
963       else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
964 	present_dep_type = REG_DEP_OUTPUT;
965       else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
966 	present_dep_type = REG_DEP_ANTI;
967       else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
968 	present_dep_type = REG_DEP_CONTROL;
969       else
970 	/* There is no existing dep so it should be created.  */
971 	return DEP_CREATED;
972 
973       if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
974 	/* DEP does not add anything to the existing dependence.  */
975 	return DEP_PRESENT;
976     }
977   else
978     {
979       ds_t present_dep_types = 0;
980 
981       if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
982 	present_dep_types |= DEP_TRUE;
983       if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
984 	present_dep_types |= DEP_OUTPUT;
985       if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
986 	present_dep_types |= DEP_ANTI;
987       if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
988 	present_dep_types |= DEP_CONTROL;
989 
990       if (present_dep_types == 0)
991 	/* There is no existing dep so it should be created.  */
992 	return DEP_CREATED;
993 
994       if (!(current_sched_info->flags & DO_SPECULATION)
995 	  || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
996 	{
997 	  if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
998 	      == present_dep_types)
999 	    /* DEP does not add anything to the existing dependence.  */
1000 	    return DEP_PRESENT;
1001 	}
1002       else
1003 	{
1004 	  /* Only true dependencies can be data speculative and
1005 	     only anti dependencies can be control speculative.  */
1006 	  gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1007 		      == present_dep_types);
1008 
1009 	  /* if (DEP is SPECULATIVE) then
1010 	     ..we should update DEP_STATUS
1011 	     else
1012 	     ..we should reset existing dep to non-speculative.  */
1013 	}
1014     }
1015 
1016   return DEP_CHANGED;
1017 }
1018 
1019 /* Set dependency caches according to DEP.  */
1020 static void
set_dependency_caches(dep_t dep)1021 set_dependency_caches (dep_t dep)
1022 {
1023   int elem_luid = INSN_LUID (DEP_PRO (dep));
1024   int insn_luid = INSN_LUID (DEP_CON (dep));
1025 
1026   if (!(current_sched_info->flags & USE_DEPS_LIST))
1027     {
1028       switch (DEP_TYPE (dep))
1029 	{
1030 	case REG_DEP_TRUE:
1031 	  bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1032 	  break;
1033 
1034 	case REG_DEP_OUTPUT:
1035 	  bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1036 	  break;
1037 
1038 	case REG_DEP_ANTI:
1039 	  bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1040 	  break;
1041 
1042 	case REG_DEP_CONTROL:
1043 	  bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1044 	  break;
1045 
1046 	default:
1047 	  gcc_unreachable ();
1048 	}
1049     }
1050   else
1051     {
1052       ds_t ds = DEP_STATUS (dep);
1053 
1054       if (ds & DEP_TRUE)
1055 	bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1056       if (ds & DEP_OUTPUT)
1057 	bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1058       if (ds & DEP_ANTI)
1059 	bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1060       if (ds & DEP_CONTROL)
1061 	bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1062 
1063       if (ds & SPECULATIVE)
1064 	{
1065 	  gcc_assert (current_sched_info->flags & DO_SPECULATION);
1066 	  bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1067 	}
1068     }
1069 }
1070 
1071 /* Type of dependence DEP have changed from OLD_TYPE.  Update dependency
1072    caches accordingly.  */
1073 static void
update_dependency_caches(dep_t dep,enum reg_note old_type)1074 update_dependency_caches (dep_t dep, enum reg_note old_type)
1075 {
1076   int elem_luid = INSN_LUID (DEP_PRO (dep));
1077   int insn_luid = INSN_LUID (DEP_CON (dep));
1078 
1079   /* Clear corresponding cache entry because type of the link
1080      may have changed.  Keep them if we use_deps_list.  */
1081   if (!(current_sched_info->flags & USE_DEPS_LIST))
1082     {
1083       switch (old_type)
1084 	{
1085 	case REG_DEP_OUTPUT:
1086 	  bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1087 	  break;
1088 
1089 	case REG_DEP_ANTI:
1090 	  bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1091 	  break;
1092 
1093 	case REG_DEP_CONTROL:
1094 	  bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1095 	  break;
1096 
1097 	default:
1098 	  gcc_unreachable ();
1099 	}
1100     }
1101 
1102   set_dependency_caches (dep);
1103 }
1104 
1105 /* Convert a dependence pointed to by SD_IT to be non-speculative.  */
1106 static void
change_spec_dep_to_hard(sd_iterator_def sd_it)1107 change_spec_dep_to_hard (sd_iterator_def sd_it)
1108 {
1109   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1110   dep_link_t link = DEP_NODE_BACK (node);
1111   dep_t dep = DEP_NODE_DEP (node);
1112   rtx_insn *elem = DEP_PRO (dep);
1113   rtx_insn *insn = DEP_CON (dep);
1114 
1115   move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1116 
1117   DEP_STATUS (dep) &= ~SPECULATIVE;
1118 
1119   if (true_dependency_cache != NULL)
1120     /* Clear the cache entry.  */
1121     bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1122 		      INSN_LUID (elem));
1123 }
1124 
1125 /* Update DEP to incorporate information from NEW_DEP.
1126    SD_IT points to DEP in case it should be moved to another list.
1127    MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1128    data-speculative dependence should be updated.  */
1129 static enum DEPS_ADJUST_RESULT
update_dep(dep_t dep,dep_t new_dep,sd_iterator_def sd_it ATTRIBUTE_UNUSED,rtx mem1 ATTRIBUTE_UNUSED,rtx mem2 ATTRIBUTE_UNUSED)1130 update_dep (dep_t dep, dep_t new_dep,
1131 	    sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1132 	    rtx mem1 ATTRIBUTE_UNUSED,
1133 	    rtx mem2 ATTRIBUTE_UNUSED)
1134 {
1135   enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1136   enum reg_note old_type = DEP_TYPE (dep);
1137   bool was_spec = dep_spec_p (dep);
1138 
1139   DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1140   DEP_MULTIPLE (dep) = 1;
1141 
1142   /* If this is a more restrictive type of dependence than the
1143      existing one, then change the existing dependence to this
1144      type.  */
1145   if ((int) DEP_TYPE (new_dep) < (int) old_type)
1146     {
1147       DEP_TYPE (dep) = DEP_TYPE (new_dep);
1148       res = DEP_CHANGED;
1149     }
1150 
1151   if (current_sched_info->flags & USE_DEPS_LIST)
1152     /* Update DEP_STATUS.  */
1153     {
1154       ds_t dep_status = DEP_STATUS (dep);
1155       ds_t ds = DEP_STATUS (new_dep);
1156       ds_t new_status = ds | dep_status;
1157 
1158       if (new_status & SPECULATIVE)
1159 	{
1160 	  /* Either existing dep or a dep we're adding or both are
1161 	     speculative.  */
1162 	  if (!(ds & SPECULATIVE)
1163 	      || !(dep_status & SPECULATIVE))
1164 	    /* The new dep can't be speculative.  */
1165 	    new_status &= ~SPECULATIVE;
1166 	  else
1167 	    {
1168 	      /* Both are speculative.  Merge probabilities.  */
1169 	      if (mem1 != NULL)
1170 		{
1171 		  dw_t dw;
1172 
1173 		  dw = estimate_dep_weak (mem1, mem2);
1174 		  ds = set_dep_weak (ds, BEGIN_DATA, dw);
1175 		}
1176 
1177 	      new_status = ds_merge (dep_status, ds);
1178 	    }
1179 	}
1180 
1181       ds = new_status;
1182 
1183       if (dep_status != ds)
1184 	{
1185 	  DEP_STATUS (dep) = ds;
1186 	  res = DEP_CHANGED;
1187 	}
1188     }
1189 
1190   if (was_spec && !dep_spec_p (dep))
1191     /* The old dep was speculative, but now it isn't.  */
1192     change_spec_dep_to_hard (sd_it);
1193 
1194   if (true_dependency_cache != NULL
1195       && res == DEP_CHANGED)
1196     update_dependency_caches (dep, old_type);
1197 
1198   return res;
1199 }
1200 
1201 /* Add or update  a dependence described by DEP.
1202    MEM1 and MEM2, if non-null, correspond to memory locations in case of
1203    data speculation.
1204 
1205    The function returns a value indicating if an old entry has been changed
1206    or a new entry has been added to insn's backward deps or nothing has
1207    been updated at all.  */
1208 static enum DEPS_ADJUST_RESULT
add_or_update_dep_1(dep_t new_dep,bool resolved_p,rtx mem1 ATTRIBUTE_UNUSED,rtx mem2 ATTRIBUTE_UNUSED)1209 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1210 		     rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1211 {
1212   bool maybe_present_p = true;
1213   bool present_p = false;
1214 
1215   gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1216 	      && DEP_PRO (new_dep) != DEP_CON (new_dep));
1217 
1218   if (flag_checking)
1219     check_dep (new_dep, mem1 != NULL);
1220 
1221   if (true_dependency_cache != NULL)
1222     {
1223       switch (ask_dependency_caches (new_dep))
1224 	{
1225 	case DEP_PRESENT:
1226 	  dep_t present_dep;
1227 	  sd_iterator_def sd_it;
1228 
1229 	  present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1230 						      DEP_CON (new_dep),
1231 						      resolved_p, &sd_it);
1232 	  DEP_MULTIPLE (present_dep) = 1;
1233 	  return DEP_PRESENT;
1234 
1235 	case DEP_CHANGED:
1236 	  maybe_present_p = true;
1237 	  present_p = true;
1238 	  break;
1239 
1240 	case DEP_CREATED:
1241 	  maybe_present_p = false;
1242 	  present_p = false;
1243 	  break;
1244 
1245 	default:
1246 	  gcc_unreachable ();
1247 	  break;
1248 	}
1249     }
1250 
1251   /* Check that we don't already have this dependence.  */
1252   if (maybe_present_p)
1253     {
1254       dep_t present_dep;
1255       sd_iterator_def sd_it;
1256 
1257       gcc_assert (true_dependency_cache == NULL || present_p);
1258 
1259       present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1260 						  DEP_CON (new_dep),
1261 						  resolved_p, &sd_it);
1262 
1263       if (present_dep != NULL)
1264 	/* We found an existing dependency between ELEM and INSN.  */
1265 	return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1266       else
1267 	/* We didn't find a dep, it shouldn't present in the cache.  */
1268 	gcc_assert (!present_p);
1269     }
1270 
1271   /* Might want to check one level of transitivity to save conses.
1272      This check should be done in maybe_add_or_update_dep_1.
1273      Since we made it to add_or_update_dep_1, we must create
1274      (or update) a link.  */
1275 
1276   if (mem1 != NULL_RTX)
1277     {
1278       gcc_assert (sched_deps_info->generate_spec_deps);
1279       DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1280 					   estimate_dep_weak (mem1, mem2));
1281     }
1282 
1283   sd_add_dep (new_dep, resolved_p);
1284 
1285   return DEP_CREATED;
1286 }
1287 
1288 /* Initialize BACK_LIST_PTR with consumer's backward list and
1289    FORW_LIST_PTR with producer's forward list.  If RESOLVED_P is true
1290    initialize with lists that hold resolved deps.  */
1291 static void
get_back_and_forw_lists(dep_t dep,bool resolved_p,deps_list_t * back_list_ptr,deps_list_t * forw_list_ptr)1292 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1293 			 deps_list_t *back_list_ptr,
1294 			 deps_list_t *forw_list_ptr)
1295 {
1296   rtx_insn *con = DEP_CON (dep);
1297 
1298   if (!resolved_p)
1299     {
1300       if (dep_spec_p (dep))
1301 	*back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1302       else
1303 	*back_list_ptr = INSN_HARD_BACK_DEPS (con);
1304 
1305       *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1306     }
1307   else
1308     {
1309       *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1310       *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1311     }
1312 }
1313 
1314 /* Add dependence described by DEP.
1315    If RESOLVED_P is true treat the dependence as a resolved one.  */
1316 void
sd_add_dep(dep_t dep,bool resolved_p)1317 sd_add_dep (dep_t dep, bool resolved_p)
1318 {
1319   dep_node_t n = create_dep_node ();
1320   deps_list_t con_back_deps;
1321   deps_list_t pro_forw_deps;
1322   rtx_insn *elem = DEP_PRO (dep);
1323   rtx_insn *insn = DEP_CON (dep);
1324 
1325   gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1326 
1327   if ((current_sched_info->flags & DO_SPECULATION) == 0
1328       || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1329     DEP_STATUS (dep) &= ~SPECULATIVE;
1330 
1331   copy_dep (DEP_NODE_DEP (n), dep);
1332 
1333   get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1334 
1335   add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1336 
1337   if (flag_checking)
1338     check_dep (dep, false);
1339 
1340   add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1341 
1342   /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1343      in the bitmap caches of dependency information.  */
1344   if (true_dependency_cache != NULL)
1345     set_dependency_caches (dep);
1346 }
1347 
1348 /* Add or update backward dependence between INSN and ELEM
1349    with given type DEP_TYPE and dep_status DS.
1350    This function is a convenience wrapper.  */
1351 enum DEPS_ADJUST_RESULT
sd_add_or_update_dep(dep_t dep,bool resolved_p)1352 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1353 {
1354   return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1355 }
1356 
1357 /* Resolved dependence pointed to by SD_IT.
1358    SD_IT will advance to the next element.  */
1359 void
sd_resolve_dep(sd_iterator_def sd_it)1360 sd_resolve_dep (sd_iterator_def sd_it)
1361 {
1362   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1363   dep_t dep = DEP_NODE_DEP (node);
1364   rtx_insn *pro = DEP_PRO (dep);
1365   rtx_insn *con = DEP_CON (dep);
1366 
1367   if (dep_spec_p (dep))
1368     move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1369 		   INSN_RESOLVED_BACK_DEPS (con));
1370   else
1371     move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1372 		   INSN_RESOLVED_BACK_DEPS (con));
1373 
1374   move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1375 		 INSN_RESOLVED_FORW_DEPS (pro));
1376 }
1377 
1378 /* Perform the inverse operation of sd_resolve_dep.  Restore the dependence
1379    pointed to by SD_IT to unresolved state.  */
1380 void
sd_unresolve_dep(sd_iterator_def sd_it)1381 sd_unresolve_dep (sd_iterator_def sd_it)
1382 {
1383   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1384   dep_t dep = DEP_NODE_DEP (node);
1385   rtx_insn *pro = DEP_PRO (dep);
1386   rtx_insn *con = DEP_CON (dep);
1387 
1388   if (dep_spec_p (dep))
1389     move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1390 		   INSN_SPEC_BACK_DEPS (con));
1391   else
1392     move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1393 		   INSN_HARD_BACK_DEPS (con));
1394 
1395   move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1396 		 INSN_FORW_DEPS (pro));
1397 }
1398 
1399 /* Make TO depend on all the FROM's producers.
1400    If RESOLVED_P is true add dependencies to the resolved lists.  */
1401 void
sd_copy_back_deps(rtx_insn * to,rtx_insn * from,bool resolved_p)1402 sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
1403 {
1404   sd_list_types_def list_type;
1405   sd_iterator_def sd_it;
1406   dep_t dep;
1407 
1408   list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1409 
1410   FOR_EACH_DEP (from, list_type, sd_it, dep)
1411     {
1412       dep_def _new_dep, *new_dep = &_new_dep;
1413 
1414       copy_dep (new_dep, dep);
1415       DEP_CON (new_dep) = to;
1416       sd_add_dep (new_dep, resolved_p);
1417     }
1418 }
1419 
1420 /* Remove a dependency referred to by SD_IT.
1421    SD_IT will point to the next dependence after removal.  */
1422 void
sd_delete_dep(sd_iterator_def sd_it)1423 sd_delete_dep (sd_iterator_def sd_it)
1424 {
1425   dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1426   dep_t dep = DEP_NODE_DEP (n);
1427   rtx_insn *pro = DEP_PRO (dep);
1428   rtx_insn *con = DEP_CON (dep);
1429   deps_list_t con_back_deps;
1430   deps_list_t pro_forw_deps;
1431 
1432   if (true_dependency_cache != NULL)
1433     {
1434       int elem_luid = INSN_LUID (pro);
1435       int insn_luid = INSN_LUID (con);
1436 
1437       bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1438       bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1439       bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1440       bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1441 
1442       if (current_sched_info->flags & DO_SPECULATION)
1443 	bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1444     }
1445 
1446   get_back_and_forw_lists (dep, sd_it.resolved_p,
1447 			   &con_back_deps, &pro_forw_deps);
1448 
1449   remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1450   remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1451 
1452   delete_dep_node (n);
1453 }
1454 
1455 /* Dump size of the lists.  */
1456 #define DUMP_LISTS_SIZE (2)
1457 
1458 /* Dump dependencies of the lists.  */
1459 #define DUMP_LISTS_DEPS (4)
1460 
1461 /* Dump all information about the lists.  */
1462 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1463 
1464 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1465    FLAGS is a bit mask specifying what information about the lists needs
1466    to be printed.
1467    If FLAGS has the very first bit set, then dump all information about
1468    the lists and propagate this bit into the callee dump functions.  */
1469 static void
dump_lists(FILE * dump,rtx insn,sd_list_types_def types,int flags)1470 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1471 {
1472   sd_iterator_def sd_it;
1473   dep_t dep;
1474   int all;
1475 
1476   all = (flags & 1);
1477 
1478   if (all)
1479     flags |= DUMP_LISTS_ALL;
1480 
1481   fprintf (dump, "[");
1482 
1483   if (flags & DUMP_LISTS_SIZE)
1484     fprintf (dump, "%d; ", sd_lists_size (insn, types));
1485 
1486   if (flags & DUMP_LISTS_DEPS)
1487     {
1488       FOR_EACH_DEP (insn, types, sd_it, dep)
1489 	{
1490 	  dump_dep (dump, dep, dump_dep_flags | all);
1491 	  fprintf (dump, " ");
1492 	}
1493     }
1494 }
1495 
1496 /* Dump all information about deps_lists of INSN specified by TYPES
1497    to STDERR.  */
1498 void
sd_debug_lists(rtx insn,sd_list_types_def types)1499 sd_debug_lists (rtx insn, sd_list_types_def types)
1500 {
1501   dump_lists (stderr, insn, types, 1);
1502   fprintf (stderr, "\n");
1503 }
1504 
1505 /* A wrapper around add_dependence_1, to add a dependence of CON on
1506    PRO, with type DEP_TYPE.  This function implements special handling
1507    for REG_DEP_CONTROL dependencies.  For these, we optionally promote
1508    the type to REG_DEP_ANTI if we can determine that predication is
1509    impossible; otherwise we add additional true dependencies on the
1510    INSN_COND_DEPS list of the jump (which PRO must be).  */
1511 void
add_dependence(rtx_insn * con,rtx_insn * pro,enum reg_note dep_type)1512 add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
1513 {
1514   if (dep_type == REG_DEP_CONTROL
1515       && !(current_sched_info->flags & DO_PREDICATION))
1516     dep_type = REG_DEP_ANTI;
1517 
1518   /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1519      so we must also make the insn dependent on the setter of the
1520      condition.  */
1521   if (dep_type == REG_DEP_CONTROL)
1522     {
1523       rtx_insn *real_pro = pro;
1524       rtx_insn *other = real_insn_for_shadow (real_pro);
1525       rtx cond;
1526 
1527       if (other != NULL_RTX)
1528 	real_pro = other;
1529       cond = sched_get_reverse_condition_uncached (real_pro);
1530       /* Verify that the insn does not use a different value in
1531 	 the condition register than the one that was present at
1532 	 the jump.  */
1533       if (cond == NULL_RTX)
1534 	dep_type = REG_DEP_ANTI;
1535       else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1536 	{
1537 	  HARD_REG_SET uses;
1538 	  CLEAR_HARD_REG_SET (uses);
1539 	  note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1540 	  if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1541 	    dep_type = REG_DEP_ANTI;
1542 	}
1543       if (dep_type == REG_DEP_CONTROL)
1544 	{
1545 	  if (sched_verbose >= 5)
1546 	    fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1547 		     INSN_UID (real_pro));
1548 	  add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1549 			       REG_DEP_TRUE, false);
1550 	}
1551     }
1552 
1553   add_dependence_1 (con, pro, dep_type);
1554 }
1555 
1556 /* A convenience wrapper to operate on an entire list.  HARD should be
1557    true if DEP_NONREG should be set on newly created dependencies.  */
1558 
1559 static void
add_dependence_list(rtx_insn * insn,rtx_insn_list * list,int uncond,enum reg_note dep_type,bool hard)1560 add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
1561 		     enum reg_note dep_type, bool hard)
1562 {
1563   mark_as_hard = hard;
1564   for (; list; list = list->next ())
1565     {
1566       if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
1567 	add_dependence (insn, list->insn (), dep_type);
1568     }
1569   mark_as_hard = false;
1570 }
1571 
1572 /* Similar, but free *LISTP at the same time, when the context
1573    is not readonly.  HARD should be true if DEP_NONREG should be set on
1574    newly created dependencies.  */
1575 
1576 static void
add_dependence_list_and_free(struct deps_desc * deps,rtx_insn * insn,rtx_insn_list ** listp,int uncond,enum reg_note dep_type,bool hard)1577 add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
1578 			      rtx_insn_list **listp,
1579                               int uncond, enum reg_note dep_type, bool hard)
1580 {
1581   add_dependence_list (insn, *listp, uncond, dep_type, hard);
1582 
1583   /* We don't want to short-circuit dependencies involving debug
1584      insns, because they may cause actual dependencies to be
1585      disregarded.  */
1586   if (deps->readonly || DEBUG_INSN_P (insn))
1587     return;
1588 
1589   free_INSN_LIST_list (listp);
1590 }
1591 
1592 /* Remove all occurrences of INSN from LIST.  Return the number of
1593    occurrences removed.  */
1594 
1595 static int
remove_from_dependence_list(rtx_insn * insn,rtx_insn_list ** listp)1596 remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp)
1597 {
1598   int removed = 0;
1599 
1600   while (*listp)
1601     {
1602       if ((*listp)->insn () == insn)
1603         {
1604           remove_free_INSN_LIST_node (listp);
1605           removed++;
1606           continue;
1607         }
1608 
1609       listp = (rtx_insn_list **)&XEXP (*listp, 1);
1610     }
1611 
1612   return removed;
1613 }
1614 
1615 /* Same as above, but process two lists at once.  */
1616 static int
remove_from_both_dependence_lists(rtx_insn * insn,rtx_insn_list ** listp,rtx_expr_list ** exprp)1617 remove_from_both_dependence_lists (rtx_insn *insn,
1618 				   rtx_insn_list **listp,
1619 				   rtx_expr_list **exprp)
1620 {
1621   int removed = 0;
1622 
1623   while (*listp)
1624     {
1625       if (XEXP (*listp, 0) == insn)
1626         {
1627           remove_free_INSN_LIST_node (listp);
1628           remove_free_EXPR_LIST_node (exprp);
1629           removed++;
1630           continue;
1631         }
1632 
1633       listp = (rtx_insn_list **)&XEXP (*listp, 1);
1634       exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
1635     }
1636 
1637   return removed;
1638 }
1639 
1640 /* Clear all dependencies for an insn.  */
1641 static void
delete_all_dependences(rtx_insn * insn)1642 delete_all_dependences (rtx_insn *insn)
1643 {
1644   sd_iterator_def sd_it;
1645   dep_t dep;
1646 
1647   /* The below cycle can be optimized to clear the caches and back_deps
1648      in one call but that would provoke duplication of code from
1649      delete_dep ().  */
1650 
1651   for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1652        sd_iterator_cond (&sd_it, &dep);)
1653     sd_delete_dep (sd_it);
1654 }
1655 
1656 /* All insns in a scheduling group except the first should only have
1657    dependencies on the previous insn in the group.  So we find the
1658    first instruction in the scheduling group by walking the dependence
1659    chains backwards. Then we add the dependencies for the group to
1660    the previous nonnote insn.  */
1661 
1662 static void
chain_to_prev_insn(rtx_insn * insn)1663 chain_to_prev_insn (rtx_insn *insn)
1664 {
1665   sd_iterator_def sd_it;
1666   dep_t dep;
1667   rtx_insn *prev_nonnote;
1668 
1669   FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1670     {
1671       rtx_insn *i = insn;
1672       rtx_insn *pro = DEP_PRO (dep);
1673 
1674       do
1675 	{
1676 	  i = prev_nonnote_insn (i);
1677 
1678 	  if (pro == i)
1679 	    goto next_link;
1680 	} while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1681 
1682       if (! sched_insns_conditions_mutex_p (i, pro))
1683 	add_dependence (i, pro, DEP_TYPE (dep));
1684     next_link:;
1685     }
1686 
1687   delete_all_dependences (insn);
1688 
1689   prev_nonnote = prev_nonnote_nondebug_insn (insn);
1690   if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1691       && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1692     add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1693 }
1694 
1695 /* Process an insn's memory dependencies.  There are four kinds of
1696    dependencies:
1697 
1698    (0) read dependence: read follows read
1699    (1) true dependence: read follows write
1700    (2) output dependence: write follows write
1701    (3) anti dependence: write follows read
1702 
1703    We are careful to build only dependencies which actually exist, and
1704    use transitivity to avoid building too many links.  */
1705 
1706 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1707    The MEM is a memory reference contained within INSN, which we are saving
1708    so that we can do memory aliasing on it.  */
1709 
1710 static void
add_insn_mem_dependence(struct deps_desc * deps,bool read_p,rtx_insn * insn,rtx mem)1711 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1712 			 rtx_insn *insn, rtx mem)
1713 {
1714   rtx_insn_list **insn_list;
1715   rtx_insn_list *insn_node;
1716   rtx_expr_list **mem_list;
1717   rtx_expr_list *mem_node;
1718 
1719   gcc_assert (!deps->readonly);
1720   if (read_p)
1721     {
1722       insn_list = &deps->pending_read_insns;
1723       mem_list = &deps->pending_read_mems;
1724       if (!DEBUG_INSN_P (insn))
1725 	deps->pending_read_list_length++;
1726     }
1727   else
1728     {
1729       insn_list = &deps->pending_write_insns;
1730       mem_list = &deps->pending_write_mems;
1731       deps->pending_write_list_length++;
1732     }
1733 
1734   insn_node = alloc_INSN_LIST (insn, *insn_list);
1735   *insn_list = insn_node;
1736 
1737   if (sched_deps_info->use_cselib)
1738     {
1739       mem = shallow_copy_rtx (mem);
1740       XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1741 							GET_MODE (mem), insn);
1742     }
1743   mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1744   *mem_list = mem_node;
1745 }
1746 
1747 /* Make a dependency between every memory reference on the pending lists
1748    and INSN, thus flushing the pending lists.  FOR_READ is true if emitting
1749    dependencies for a read operation, similarly with FOR_WRITE.  */
1750 
1751 static void
flush_pending_lists(struct deps_desc * deps,rtx_insn * insn,int for_read,int for_write)1752 flush_pending_lists (struct deps_desc *deps, rtx_insn *insn, int for_read,
1753 		     int for_write)
1754 {
1755   if (for_write)
1756     {
1757       add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1758                                     1, REG_DEP_ANTI, true);
1759       if (!deps->readonly)
1760         {
1761           free_EXPR_LIST_list (&deps->pending_read_mems);
1762           deps->pending_read_list_length = 0;
1763         }
1764     }
1765 
1766   add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1767 				for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1768 				true);
1769 
1770   add_dependence_list_and_free (deps, insn,
1771                                 &deps->last_pending_memory_flush, 1,
1772                                 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1773 				true);
1774 
1775   add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1776 				REG_DEP_ANTI, true);
1777 
1778   if (DEBUG_INSN_P (insn))
1779     {
1780       if (for_write)
1781 	free_INSN_LIST_list (&deps->pending_read_insns);
1782       free_INSN_LIST_list (&deps->pending_write_insns);
1783       free_INSN_LIST_list (&deps->last_pending_memory_flush);
1784       free_INSN_LIST_list (&deps->pending_jump_insns);
1785     }
1786 
1787   if (!deps->readonly)
1788     {
1789       free_EXPR_LIST_list (&deps->pending_write_mems);
1790       deps->pending_write_list_length = 0;
1791 
1792       deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1793       deps->pending_flush_length = 1;
1794     }
1795   mark_as_hard = false;
1796 }
1797 
1798 /* Instruction which dependencies we are analyzing.  */
1799 static rtx_insn *cur_insn = NULL;
1800 
1801 /* Implement hooks for haifa scheduler.  */
1802 
1803 static void
haifa_start_insn(rtx_insn * insn)1804 haifa_start_insn (rtx_insn *insn)
1805 {
1806   gcc_assert (insn && !cur_insn);
1807 
1808   cur_insn = insn;
1809 }
1810 
1811 static void
haifa_finish_insn(void)1812 haifa_finish_insn (void)
1813 {
1814   cur_insn = NULL;
1815 }
1816 
1817 void
haifa_note_reg_set(int regno)1818 haifa_note_reg_set (int regno)
1819 {
1820   SET_REGNO_REG_SET (reg_pending_sets, regno);
1821 }
1822 
1823 void
haifa_note_reg_clobber(int regno)1824 haifa_note_reg_clobber (int regno)
1825 {
1826   SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1827 }
1828 
1829 void
haifa_note_reg_use(int regno)1830 haifa_note_reg_use (int regno)
1831 {
1832   SET_REGNO_REG_SET (reg_pending_uses, regno);
1833 }
1834 
1835 static void
haifa_note_mem_dep(rtx mem,rtx pending_mem,rtx_insn * pending_insn,ds_t ds)1836 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
1837 {
1838   if (!(ds & SPECULATIVE))
1839     {
1840       mem = NULL_RTX;
1841       pending_mem = NULL_RTX;
1842     }
1843   else
1844     gcc_assert (ds & BEGIN_DATA);
1845 
1846   {
1847     dep_def _dep, *dep = &_dep;
1848 
1849     init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1850                 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1851     DEP_NONREG (dep) = 1;
1852     maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1853   }
1854 
1855 }
1856 
1857 static void
haifa_note_dep(rtx_insn * elem,ds_t ds)1858 haifa_note_dep (rtx_insn *elem, ds_t ds)
1859 {
1860   dep_def _dep;
1861   dep_t dep = &_dep;
1862 
1863   init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1864   if (mark_as_hard)
1865     DEP_NONREG (dep) = 1;
1866   maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1867 }
1868 
1869 static void
note_reg_use(int r)1870 note_reg_use (int r)
1871 {
1872   if (sched_deps_info->note_reg_use)
1873     sched_deps_info->note_reg_use (r);
1874 }
1875 
1876 static void
note_reg_set(int r)1877 note_reg_set (int r)
1878 {
1879   if (sched_deps_info->note_reg_set)
1880     sched_deps_info->note_reg_set (r);
1881 }
1882 
1883 static void
note_reg_clobber(int r)1884 note_reg_clobber (int r)
1885 {
1886   if (sched_deps_info->note_reg_clobber)
1887     sched_deps_info->note_reg_clobber (r);
1888 }
1889 
1890 static void
note_mem_dep(rtx m1,rtx m2,rtx_insn * e,ds_t ds)1891 note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
1892 {
1893   if (sched_deps_info->note_mem_dep)
1894     sched_deps_info->note_mem_dep (m1, m2, e, ds);
1895 }
1896 
1897 static void
note_dep(rtx_insn * e,ds_t ds)1898 note_dep (rtx_insn *e, ds_t ds)
1899 {
1900   if (sched_deps_info->note_dep)
1901     sched_deps_info->note_dep (e, ds);
1902 }
1903 
1904 /* Return corresponding to DS reg_note.  */
1905 enum reg_note
ds_to_dt(ds_t ds)1906 ds_to_dt (ds_t ds)
1907 {
1908   if (ds & DEP_TRUE)
1909     return REG_DEP_TRUE;
1910   else if (ds & DEP_OUTPUT)
1911     return REG_DEP_OUTPUT;
1912   else if (ds & DEP_ANTI)
1913     return REG_DEP_ANTI;
1914   else
1915     {
1916       gcc_assert (ds & DEP_CONTROL);
1917       return REG_DEP_CONTROL;
1918     }
1919 }
1920 
1921 
1922 
1923 /* Functions for computation of info needed for register pressure
1924    sensitive insn scheduling.  */
1925 
1926 
1927 /* Allocate and return reg_use_data structure for REGNO and INSN.  */
1928 static struct reg_use_data *
create_insn_reg_use(int regno,rtx_insn * insn)1929 create_insn_reg_use (int regno, rtx_insn *insn)
1930 {
1931   struct reg_use_data *use;
1932 
1933   use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1934   use->regno = regno;
1935   use->insn = insn;
1936   use->next_insn_use = INSN_REG_USE_LIST (insn);
1937   INSN_REG_USE_LIST (insn) = use;
1938   return use;
1939 }
1940 
1941 /* Allocate reg_set_data structure for REGNO and INSN.  */
1942 static void
create_insn_reg_set(int regno,rtx insn)1943 create_insn_reg_set (int regno, rtx insn)
1944 {
1945   struct reg_set_data *set;
1946 
1947   set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1948   set->regno = regno;
1949   set->insn = insn;
1950   set->next_insn_set = INSN_REG_SET_LIST (insn);
1951   INSN_REG_SET_LIST (insn) = set;
1952 }
1953 
1954 /* Set up insn register uses for INSN and dependency context DEPS.  */
1955 static void
setup_insn_reg_uses(struct deps_desc * deps,rtx_insn * insn)1956 setup_insn_reg_uses (struct deps_desc *deps, rtx_insn *insn)
1957 {
1958   unsigned i;
1959   reg_set_iterator rsi;
1960   struct reg_use_data *use, *use2, *next;
1961   struct deps_reg *reg_last;
1962 
1963   EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1964     {
1965       if (i < FIRST_PSEUDO_REGISTER
1966 	  && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1967 	continue;
1968 
1969       if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1970 	  && ! REGNO_REG_SET_P (reg_pending_sets, i)
1971 	  && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1972 	/* Ignore use which is not dying.  */
1973 	continue;
1974 
1975       use = create_insn_reg_use (i, insn);
1976       use->next_regno_use = use;
1977       reg_last = &deps->reg_last[i];
1978 
1979       /* Create the cycle list of uses.  */
1980       for (rtx_insn_list *list = reg_last->uses; list; list = list->next ())
1981 	{
1982 	  use2 = create_insn_reg_use (i, list->insn ());
1983 	  next = use->next_regno_use;
1984 	  use->next_regno_use = use2;
1985 	  use2->next_regno_use = next;
1986 	}
1987     }
1988 }
1989 
1990 /* Register pressure info for the currently processed insn.  */
1991 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1992 
1993 /* Return TRUE if INSN has the use structure for REGNO.  */
1994 static bool
insn_use_p(rtx insn,int regno)1995 insn_use_p (rtx insn, int regno)
1996 {
1997   struct reg_use_data *use;
1998 
1999   for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2000     if (use->regno == regno)
2001       return true;
2002   return false;
2003 }
2004 
2005 /* Update the register pressure info after birth of pseudo register REGNO
2006    in INSN.  Arguments CLOBBER_P and UNUSED_P say correspondingly that
2007    the register is in clobber or unused after the insn.  */
2008 static void
mark_insn_pseudo_birth(rtx insn,int regno,bool clobber_p,bool unused_p)2009 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2010 {
2011   int incr, new_incr;
2012   enum reg_class cl;
2013 
2014   gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2015   cl = sched_regno_pressure_class[regno];
2016   if (cl != NO_REGS)
2017     {
2018       incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2019       if (clobber_p)
2020 	{
2021 	  new_incr = reg_pressure_info[cl].clobber_increase + incr;
2022 	  reg_pressure_info[cl].clobber_increase = new_incr;
2023 	}
2024       else if (unused_p)
2025 	{
2026 	  new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2027 	  reg_pressure_info[cl].unused_set_increase = new_incr;
2028 	}
2029       else
2030 	{
2031 	  new_incr = reg_pressure_info[cl].set_increase + incr;
2032 	  reg_pressure_info[cl].set_increase = new_incr;
2033 	  if (! insn_use_p (insn, regno))
2034 	    reg_pressure_info[cl].change += incr;
2035 	  create_insn_reg_set (regno, insn);
2036 	}
2037       gcc_assert (new_incr < (1 << INCREASE_BITS));
2038     }
2039 }
2040 
2041 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2042    hard registers involved in the birth.  */
2043 static void
mark_insn_hard_regno_birth(rtx insn,int regno,int nregs,bool clobber_p,bool unused_p)2044 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2045 			    bool clobber_p, bool unused_p)
2046 {
2047   enum reg_class cl;
2048   int new_incr, last = regno + nregs;
2049 
2050   while (regno < last)
2051     {
2052       gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2053       if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2054 	{
2055 	  cl = sched_regno_pressure_class[regno];
2056 	  if (cl != NO_REGS)
2057 	    {
2058 	      if (clobber_p)
2059 		{
2060 		  new_incr = reg_pressure_info[cl].clobber_increase + 1;
2061 		  reg_pressure_info[cl].clobber_increase = new_incr;
2062 		}
2063 	      else if (unused_p)
2064 		{
2065 		  new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2066 		  reg_pressure_info[cl].unused_set_increase = new_incr;
2067 		}
2068 	      else
2069 		{
2070 		  new_incr = reg_pressure_info[cl].set_increase + 1;
2071 		  reg_pressure_info[cl].set_increase = new_incr;
2072 		  if (! insn_use_p (insn, regno))
2073 		    reg_pressure_info[cl].change += 1;
2074 		  create_insn_reg_set (regno, insn);
2075 		}
2076 	      gcc_assert (new_incr < (1 << INCREASE_BITS));
2077 	    }
2078 	}
2079       regno++;
2080     }
2081 }
2082 
2083 /* Update the register pressure info after birth of pseudo or hard
2084    register REG in INSN.  Arguments CLOBBER_P and UNUSED_P say
2085    correspondingly that the register is in clobber or unused after the
2086    insn.  */
2087 static void
mark_insn_reg_birth(rtx insn,rtx reg,bool clobber_p,bool unused_p)2088 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2089 {
2090   int regno;
2091 
2092   if (GET_CODE (reg) == SUBREG)
2093     reg = SUBREG_REG (reg);
2094 
2095   if (! REG_P (reg))
2096     return;
2097 
2098   regno = REGNO (reg);
2099   if (regno < FIRST_PSEUDO_REGISTER)
2100     mark_insn_hard_regno_birth (insn, regno, REG_NREGS (reg),
2101 				clobber_p, unused_p);
2102   else
2103     mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2104 }
2105 
2106 /* Update the register pressure info after death of pseudo register
2107    REGNO.  */
2108 static void
mark_pseudo_death(int regno)2109 mark_pseudo_death (int regno)
2110 {
2111   int incr;
2112   enum reg_class cl;
2113 
2114   gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2115   cl = sched_regno_pressure_class[regno];
2116   if (cl != NO_REGS)
2117     {
2118       incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2119       reg_pressure_info[cl].change -= incr;
2120     }
2121 }
2122 
2123 /* Like mark_pseudo_death except that NREGS saying how many hard
2124    registers involved in the death.  */
2125 static void
mark_hard_regno_death(int regno,int nregs)2126 mark_hard_regno_death (int regno, int nregs)
2127 {
2128   enum reg_class cl;
2129   int last = regno + nregs;
2130 
2131   while (regno < last)
2132     {
2133       gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2134       if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2135 	{
2136 	  cl = sched_regno_pressure_class[regno];
2137 	  if (cl != NO_REGS)
2138 	    reg_pressure_info[cl].change -= 1;
2139 	}
2140       regno++;
2141     }
2142 }
2143 
2144 /* Update the register pressure info after death of pseudo or hard
2145    register REG.  */
2146 static void
mark_reg_death(rtx reg)2147 mark_reg_death (rtx reg)
2148 {
2149   int regno;
2150 
2151   if (GET_CODE (reg) == SUBREG)
2152     reg = SUBREG_REG (reg);
2153 
2154   if (! REG_P (reg))
2155     return;
2156 
2157   regno = REGNO (reg);
2158   if (regno < FIRST_PSEUDO_REGISTER)
2159     mark_hard_regno_death (regno, REG_NREGS (reg));
2160   else
2161     mark_pseudo_death (regno);
2162 }
2163 
2164 /* Process SETTER of REG.  DATA is an insn containing the setter.  */
2165 static void
mark_insn_reg_store(rtx reg,const_rtx setter,void * data)2166 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2167 {
2168   if (setter != NULL_RTX && GET_CODE (setter) != SET)
2169     return;
2170   mark_insn_reg_birth
2171     ((rtx) data, reg, false,
2172      find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2173 }
2174 
2175 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs.  */
2176 static void
mark_insn_reg_clobber(rtx reg,const_rtx setter,void * data)2177 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2178 {
2179   if (GET_CODE (setter) == CLOBBER)
2180     mark_insn_reg_birth ((rtx) data, reg, true, false);
2181 }
2182 
2183 /* Set up reg pressure info related to INSN.  */
2184 void
init_insn_reg_pressure_info(rtx_insn * insn)2185 init_insn_reg_pressure_info (rtx_insn *insn)
2186 {
2187   int i, len;
2188   enum reg_class cl;
2189   static struct reg_pressure_data *pressure_info;
2190   rtx link;
2191 
2192   gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
2193 
2194   if (! INSN_P (insn))
2195     return;
2196 
2197   for (i = 0; i < ira_pressure_classes_num; i++)
2198     {
2199       cl = ira_pressure_classes[i];
2200       reg_pressure_info[cl].clobber_increase = 0;
2201       reg_pressure_info[cl].set_increase = 0;
2202       reg_pressure_info[cl].unused_set_increase = 0;
2203       reg_pressure_info[cl].change = 0;
2204     }
2205 
2206   note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2207 
2208   note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2209 
2210   if (AUTO_INC_DEC)
2211     for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2212       if (REG_NOTE_KIND (link) == REG_INC)
2213 	mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2214 
2215   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2216     if (REG_NOTE_KIND (link) == REG_DEAD)
2217       mark_reg_death (XEXP (link, 0));
2218 
2219   len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2220   pressure_info
2221     = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2222   if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2223     INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2224 						    * sizeof (int), 1);
2225   for (i = 0; i < ira_pressure_classes_num; i++)
2226     {
2227       cl = ira_pressure_classes[i];
2228       pressure_info[i].clobber_increase
2229 	= reg_pressure_info[cl].clobber_increase;
2230       pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2231       pressure_info[i].unused_set_increase
2232 	= reg_pressure_info[cl].unused_set_increase;
2233       pressure_info[i].change = reg_pressure_info[cl].change;
2234     }
2235 }
2236 
2237 
2238 
2239 
2240 /* Internal variable for sched_analyze_[12] () functions.
2241    If it is nonzero, this means that sched_analyze_[12] looks
2242    at the most toplevel SET.  */
2243 static bool can_start_lhs_rhs_p;
2244 
2245 /* Extend reg info for the deps context DEPS given that
2246    we have just generated a register numbered REGNO.  */
2247 static void
extend_deps_reg_info(struct deps_desc * deps,int regno)2248 extend_deps_reg_info (struct deps_desc *deps, int regno)
2249 {
2250   int max_regno = regno + 1;
2251 
2252   gcc_assert (!reload_completed);
2253 
2254   /* In a readonly context, it would not hurt to extend info,
2255      but it should not be needed.  */
2256   if (reload_completed && deps->readonly)
2257     {
2258       deps->max_reg = max_regno;
2259       return;
2260     }
2261 
2262   if (max_regno > deps->max_reg)
2263     {
2264       deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2265                                    max_regno);
2266       memset (&deps->reg_last[deps->max_reg],
2267               0, (max_regno - deps->max_reg)
2268               * sizeof (struct deps_reg));
2269       deps->max_reg = max_regno;
2270     }
2271 }
2272 
2273 /* Extends REG_INFO_P if needed.  */
2274 void
maybe_extend_reg_info_p(void)2275 maybe_extend_reg_info_p (void)
2276 {
2277   /* Extend REG_INFO_P, if needed.  */
2278   if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2279     {
2280       size_t new_reg_info_p_size = max_regno + 128;
2281 
2282       gcc_assert (!reload_completed && sel_sched_p ());
2283 
2284       reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2285                                                     new_reg_info_p_size,
2286                                                     reg_info_p_size,
2287                                                     sizeof (*reg_info_p));
2288       reg_info_p_size = new_reg_info_p_size;
2289     }
2290 }
2291 
2292 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2293    The type of the reference is specified by REF and can be SET,
2294    CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE.  */
2295 
2296 static void
sched_analyze_reg(struct deps_desc * deps,int regno,machine_mode mode,enum rtx_code ref,rtx_insn * insn)2297 sched_analyze_reg (struct deps_desc *deps, int regno, machine_mode mode,
2298 		   enum rtx_code ref, rtx_insn *insn)
2299 {
2300   /* We could emit new pseudos in renaming.  Extend the reg structures.  */
2301   if (!reload_completed && sel_sched_p ()
2302       && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2303     extend_deps_reg_info (deps, regno);
2304 
2305   maybe_extend_reg_info_p ();
2306 
2307   /* A hard reg in a wide mode may really be multiple registers.
2308      If so, mark all of them just like the first.  */
2309   if (regno < FIRST_PSEUDO_REGISTER)
2310     {
2311       int i = hard_regno_nregs (regno, mode);
2312       if (ref == SET)
2313 	{
2314 	  while (--i >= 0)
2315 	    note_reg_set (regno + i);
2316 	}
2317       else if (ref == USE)
2318 	{
2319 	  while (--i >= 0)
2320 	    note_reg_use (regno + i);
2321 	}
2322       else if (ref == CLOBBER_HIGH)
2323 	{
2324 	  gcc_assert (i == 1);
2325 	  /* We don't know the current state of the register, so have to treat
2326 	     the clobber high as a full clobber.  */
2327 	  note_reg_clobber (regno);
2328 	}
2329       else
2330 	{
2331 	  while (--i >= 0)
2332 	    note_reg_clobber (regno + i);
2333 	}
2334     }
2335 
2336   /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2337      it does not reload.  Ignore these as they have served their
2338      purpose already.  */
2339   else if (regno >= deps->max_reg)
2340     {
2341       enum rtx_code code = GET_CODE (PATTERN (insn));
2342       gcc_assert (code == USE || code == CLOBBER);
2343     }
2344 
2345   else
2346     {
2347       if (ref == SET)
2348 	note_reg_set (regno);
2349       else if (ref == USE)
2350 	note_reg_use (regno);
2351       else
2352 	/* For CLOBBER_HIGH, we don't know the current state of the register,
2353 	   so have to treat it as a full clobber.  */
2354 	note_reg_clobber (regno);
2355 
2356       /* Pseudos that are REG_EQUIV to something may be replaced
2357 	 by that during reloading.  We need only add dependencies for
2358 	the address in the REG_EQUIV note.  */
2359       if (!reload_completed && get_reg_known_equiv_p (regno))
2360 	{
2361 	  rtx t = get_reg_known_value (regno);
2362 	  if (MEM_P (t))
2363 	    sched_analyze_2 (deps, XEXP (t, 0), insn);
2364 	}
2365 
2366       /* Don't let it cross a call after scheduling if it doesn't
2367 	 already cross one.  */
2368       if (REG_N_CALLS_CROSSED (regno) == 0)
2369 	{
2370 	  if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2371 	    deps->sched_before_next_call
2372 	      = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2373 	  else
2374 	    add_dependence_list (insn, deps->last_function_call, 1,
2375 				 REG_DEP_ANTI, false);
2376 	}
2377     }
2378 }
2379 
2380 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2381    rtx, X, creating all dependencies generated by the write to the
2382    destination of X, and reads of everything mentioned.  */
2383 
2384 static void
sched_analyze_1(struct deps_desc * deps,rtx x,rtx_insn * insn)2385 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2386 {
2387   rtx dest = XEXP (x, 0);
2388   enum rtx_code code = GET_CODE (x);
2389   bool cslr_p = can_start_lhs_rhs_p;
2390 
2391   can_start_lhs_rhs_p = false;
2392 
2393   gcc_assert (dest);
2394   if (dest == 0)
2395     return;
2396 
2397   if (cslr_p && sched_deps_info->start_lhs)
2398     sched_deps_info->start_lhs (dest);
2399 
2400   if (GET_CODE (dest) == PARALLEL)
2401     {
2402       int i;
2403 
2404       for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2405 	if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2406 	  sched_analyze_1 (deps,
2407 			   gen_rtx_CLOBBER (VOIDmode,
2408 					    XEXP (XVECEXP (dest, 0, i), 0)),
2409 			   insn);
2410 
2411       if (cslr_p && sched_deps_info->finish_lhs)
2412 	sched_deps_info->finish_lhs ();
2413 
2414       if (code == SET)
2415 	{
2416 	  can_start_lhs_rhs_p = cslr_p;
2417 
2418 	  sched_analyze_2 (deps, SET_SRC (x), insn);
2419 
2420 	  can_start_lhs_rhs_p = false;
2421 	}
2422 
2423       return;
2424     }
2425 
2426   while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2427 	 || GET_CODE (dest) == ZERO_EXTRACT)
2428     {
2429       if (GET_CODE (dest) == STRICT_LOW_PART
2430 	 || GET_CODE (dest) == ZERO_EXTRACT
2431 	 || read_modify_subreg_p (dest))
2432         {
2433 	  /* These both read and modify the result.  We must handle
2434              them as writes to get proper dependencies for following
2435              instructions.  We must handle them as reads to get proper
2436              dependencies from this to previous instructions.
2437              Thus we need to call sched_analyze_2.  */
2438 
2439 	  sched_analyze_2 (deps, XEXP (dest, 0), insn);
2440 	}
2441       if (GET_CODE (dest) == ZERO_EXTRACT)
2442 	{
2443 	  /* The second and third arguments are values read by this insn.  */
2444 	  sched_analyze_2 (deps, XEXP (dest, 1), insn);
2445 	  sched_analyze_2 (deps, XEXP (dest, 2), insn);
2446 	}
2447       dest = XEXP (dest, 0);
2448     }
2449 
2450   if (REG_P (dest))
2451     {
2452       int regno = REGNO (dest);
2453       machine_mode mode = GET_MODE (dest);
2454 
2455       sched_analyze_reg (deps, regno, mode, code, insn);
2456 
2457 #ifdef STACK_REGS
2458       /* Treat all writes to a stack register as modifying the TOS.  */
2459       if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2460 	{
2461 	  /* Avoid analyzing the same register twice.  */
2462 	  if (regno != FIRST_STACK_REG)
2463 	    sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2464 
2465 	  add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2466 			       FIRST_STACK_REG);
2467 	}
2468 #endif
2469     }
2470   else if (MEM_P (dest))
2471     {
2472       /* Writing memory.  */
2473       rtx t = dest;
2474 
2475       if (sched_deps_info->use_cselib)
2476 	{
2477 	  machine_mode address_mode = get_address_mode (dest);
2478 
2479 	  t = shallow_copy_rtx (dest);
2480 	  cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2481 				   GET_MODE (t), insn);
2482 	  XEXP (t, 0)
2483 	    = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2484 						insn);
2485 	}
2486       t = canon_rtx (t);
2487 
2488       /* Pending lists can't get larger with a readonly context.  */
2489       if (!deps->readonly
2490           && ((deps->pending_read_list_length + deps->pending_write_list_length)
2491               >= MAX_PENDING_LIST_LENGTH))
2492 	{
2493 	  /* Flush all pending reads and writes to prevent the pending lists
2494 	     from getting any larger.  Insn scheduling runs too slowly when
2495 	     these lists get long.  When compiling GCC with itself,
2496 	     this flush occurs 8 times for sparc, and 10 times for m88k using
2497 	     the default value of 32.  */
2498 	  flush_pending_lists (deps, insn, false, true);
2499 	}
2500       else
2501 	{
2502 	  rtx_insn_list *pending;
2503 	  rtx_expr_list *pending_mem;
2504 
2505 	  pending = deps->pending_read_insns;
2506 	  pending_mem = deps->pending_read_mems;
2507 	  while (pending)
2508 	    {
2509 	      if (anti_dependence (pending_mem->element (), t)
2510 		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2511 		note_mem_dep (t, pending_mem->element (), pending->insn (),
2512 			      DEP_ANTI);
2513 
2514 	      pending = pending->next ();
2515 	      pending_mem = pending_mem->next ();
2516 	    }
2517 
2518 	  pending = deps->pending_write_insns;
2519 	  pending_mem = deps->pending_write_mems;
2520 	  while (pending)
2521 	    {
2522 	      if (output_dependence (pending_mem->element (), t)
2523 		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2524 		note_mem_dep (t, pending_mem->element (),
2525 			      pending->insn (),
2526 			      DEP_OUTPUT);
2527 
2528 	      pending = pending->next ();
2529 	      pending_mem = pending_mem-> next ();
2530 	    }
2531 
2532 	  add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2533 			       REG_DEP_ANTI, true);
2534 	  add_dependence_list (insn, deps->pending_jump_insns, 1,
2535 			       REG_DEP_CONTROL, true);
2536 
2537           if (!deps->readonly)
2538             add_insn_mem_dependence (deps, false, insn, dest);
2539 	}
2540       sched_analyze_2 (deps, XEXP (dest, 0), insn);
2541     }
2542 
2543   if (cslr_p && sched_deps_info->finish_lhs)
2544     sched_deps_info->finish_lhs ();
2545 
2546   /* Analyze reads.  */
2547   if (GET_CODE (x) == SET)
2548     {
2549       can_start_lhs_rhs_p = cslr_p;
2550 
2551       sched_analyze_2 (deps, SET_SRC (x), insn);
2552 
2553       can_start_lhs_rhs_p = false;
2554     }
2555 }
2556 
2557 /* Analyze the uses of memory and registers in rtx X in INSN.  */
2558 static void
sched_analyze_2(struct deps_desc * deps,rtx x,rtx_insn * insn)2559 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2560 {
2561   int i;
2562   int j;
2563   enum rtx_code code;
2564   const char *fmt;
2565   bool cslr_p = can_start_lhs_rhs_p;
2566 
2567   can_start_lhs_rhs_p = false;
2568 
2569   gcc_assert (x);
2570   if (x == 0)
2571     return;
2572 
2573   if (cslr_p && sched_deps_info->start_rhs)
2574     sched_deps_info->start_rhs (x);
2575 
2576   code = GET_CODE (x);
2577 
2578   switch (code)
2579     {
2580     CASE_CONST_ANY:
2581     case SYMBOL_REF:
2582     case CONST:
2583     case LABEL_REF:
2584       /* Ignore constants.  */
2585       if (cslr_p && sched_deps_info->finish_rhs)
2586 	sched_deps_info->finish_rhs ();
2587 
2588       return;
2589 
2590     case CC0:
2591       if (!HAVE_cc0)
2592 	gcc_unreachable ();
2593 
2594       /* User of CC0 depends on immediately preceding insn.  */
2595       SCHED_GROUP_P (insn) = 1;
2596        /* Don't move CC0 setter to another block (it can set up the
2597         same flag for previous CC0 users which is safe).  */
2598       CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2599 
2600       if (cslr_p && sched_deps_info->finish_rhs)
2601 	sched_deps_info->finish_rhs ();
2602 
2603       return;
2604 
2605     case REG:
2606       {
2607 	int regno = REGNO (x);
2608 	machine_mode mode = GET_MODE (x);
2609 
2610 	sched_analyze_reg (deps, regno, mode, USE, insn);
2611 
2612 #ifdef STACK_REGS
2613       /* Treat all reads of a stack register as modifying the TOS.  */
2614       if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2615 	{
2616 	  /* Avoid analyzing the same register twice.  */
2617 	  if (regno != FIRST_STACK_REG)
2618 	    sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2619 	  sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2620 	}
2621 #endif
2622 
2623 	if (cslr_p && sched_deps_info->finish_rhs)
2624 	  sched_deps_info->finish_rhs ();
2625 
2626 	return;
2627       }
2628 
2629     case MEM:
2630       {
2631 	/* Reading memory.  */
2632 	rtx_insn_list *u;
2633 	rtx_insn_list *pending;
2634 	rtx_expr_list *pending_mem;
2635 	rtx t = x;
2636 
2637 	if (sched_deps_info->use_cselib)
2638 	  {
2639 	    machine_mode address_mode = get_address_mode (t);
2640 
2641 	    t = shallow_copy_rtx (t);
2642 	    cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2643 				     GET_MODE (t), insn);
2644 	    XEXP (t, 0)
2645 	      = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2646 						  insn);
2647 	  }
2648 
2649 	if (!DEBUG_INSN_P (insn))
2650 	  {
2651 	    t = canon_rtx (t);
2652 	    pending = deps->pending_read_insns;
2653 	    pending_mem = deps->pending_read_mems;
2654 	    while (pending)
2655 	      {
2656 		if (read_dependence (pending_mem->element (), t)
2657 		    && ! sched_insns_conditions_mutex_p (insn,
2658 							 pending->insn ()))
2659 		  note_mem_dep (t, pending_mem->element (),
2660 				pending->insn (),
2661 				DEP_ANTI);
2662 
2663 		pending = pending->next ();
2664 		pending_mem = pending_mem->next ();
2665 	      }
2666 
2667 	    pending = deps->pending_write_insns;
2668 	    pending_mem = deps->pending_write_mems;
2669 	    while (pending)
2670 	      {
2671 		if (true_dependence (pending_mem->element (), VOIDmode, t)
2672 		    && ! sched_insns_conditions_mutex_p (insn,
2673 							 pending->insn ()))
2674 		  note_mem_dep (t, pending_mem->element (),
2675 				pending->insn (),
2676 				sched_deps_info->generate_spec_deps
2677 				? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2678 
2679 		pending = pending->next ();
2680 		pending_mem = pending_mem->next ();
2681 	      }
2682 
2683 	    for (u = deps->last_pending_memory_flush; u; u = u->next ())
2684 	      add_dependence (insn, u->insn (), REG_DEP_ANTI);
2685 
2686 	    for (u = deps->pending_jump_insns; u; u = u->next ())
2687 	      if (deps_may_trap_p (x))
2688 		{
2689 		  if ((sched_deps_info->generate_spec_deps)
2690 		      && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2691 		    {
2692 		      ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2693 					      MAX_DEP_WEAK);
2694 
2695 		      note_dep (u->insn (), ds);
2696 		    }
2697 		  else
2698 		    add_dependence (insn, u->insn (), REG_DEP_CONTROL);
2699 		}
2700 	  }
2701 
2702 	/* Always add these dependencies to pending_reads, since
2703 	   this insn may be followed by a write.  */
2704 	if (!deps->readonly)
2705 	  {
2706 	    if ((deps->pending_read_list_length
2707 		 + deps->pending_write_list_length)
2708 		>= MAX_PENDING_LIST_LENGTH
2709 		&& !DEBUG_INSN_P (insn))
2710 	      flush_pending_lists (deps, insn, true, true);
2711 	    add_insn_mem_dependence (deps, true, insn, x);
2712 	  }
2713 
2714 	sched_analyze_2 (deps, XEXP (x, 0), insn);
2715 
2716 	if (cslr_p && sched_deps_info->finish_rhs)
2717 	  sched_deps_info->finish_rhs ();
2718 
2719 	return;
2720       }
2721 
2722     /* Force pending stores to memory in case a trap handler needs them.
2723        Also force pending loads from memory; loads and stores can segfault
2724        and the signal handler won't be triggered if the trap insn was moved
2725        above load or store insn.  */
2726     case TRAP_IF:
2727       flush_pending_lists (deps, insn, true, true);
2728       break;
2729 
2730     case PREFETCH:
2731       if (PREFETCH_SCHEDULE_BARRIER_P (x))
2732 	reg_pending_barrier = TRUE_BARRIER;
2733       /* Prefetch insn contains addresses only.  So if the prefetch
2734 	 address has no registers, there will be no dependencies on
2735 	 the prefetch insn.  This is wrong with result code
2736 	 correctness point of view as such prefetch can be moved below
2737 	 a jump insn which usually generates MOVE_BARRIER preventing
2738 	 to move insns containing registers or memories through the
2739 	 barrier.  It is also wrong with generated code performance
2740 	 point of view as prefetch withouth dependecies will have a
2741 	 tendency to be issued later instead of earlier.  It is hard
2742 	 to generate accurate dependencies for prefetch insns as
2743 	 prefetch has only the start address but it is better to have
2744 	 something than nothing.  */
2745       if (!deps->readonly)
2746 	{
2747 	  rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2748 	  if (sched_deps_info->use_cselib)
2749 	    cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2750 	  add_insn_mem_dependence (deps, true, insn, x);
2751 	}
2752       break;
2753 
2754     case UNSPEC_VOLATILE:
2755       flush_pending_lists (deps, insn, true, true);
2756       /* FALLTHRU */
2757 
2758     case ASM_OPERANDS:
2759     case ASM_INPUT:
2760       {
2761 	/* Traditional and volatile asm instructions must be considered to use
2762 	   and clobber all hard registers, all pseudo-registers and all of
2763 	   memory.  So must TRAP_IF and UNSPEC_VOLATILE operations.
2764 
2765 	   Consider for instance a volatile asm that changes the fpu rounding
2766 	   mode.  An insn should not be moved across this even if it only uses
2767 	   pseudo-regs because it might give an incorrectly rounded result.  */
2768 	if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2769 	    && !DEBUG_INSN_P (insn))
2770 	  reg_pending_barrier = TRUE_BARRIER;
2771 
2772 	/* For all ASM_OPERANDS, we must traverse the vector of input operands.
2773 	   We cannot just fall through here since then we would be confused
2774 	   by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2775 	   traditional asms unlike their normal usage.  */
2776 
2777 	if (code == ASM_OPERANDS)
2778 	  {
2779 	    for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2780 	      sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2781 
2782 	    if (cslr_p && sched_deps_info->finish_rhs)
2783 	      sched_deps_info->finish_rhs ();
2784 
2785 	    return;
2786 	  }
2787 	break;
2788       }
2789 
2790     case PRE_DEC:
2791     case POST_DEC:
2792     case PRE_INC:
2793     case POST_INC:
2794       /* These both read and modify the result.  We must handle them as writes
2795          to get proper dependencies for following instructions.  We must handle
2796          them as reads to get proper dependencies from this to previous
2797          instructions.  Thus we need to pass them to both sched_analyze_1
2798          and sched_analyze_2.  We must call sched_analyze_2 first in order
2799          to get the proper antecedent for the read.  */
2800       sched_analyze_2 (deps, XEXP (x, 0), insn);
2801       sched_analyze_1 (deps, x, insn);
2802 
2803       if (cslr_p && sched_deps_info->finish_rhs)
2804 	sched_deps_info->finish_rhs ();
2805 
2806       return;
2807 
2808     case POST_MODIFY:
2809     case PRE_MODIFY:
2810       /* op0 = op0 + op1 */
2811       sched_analyze_2 (deps, XEXP (x, 0), insn);
2812       sched_analyze_2 (deps, XEXP (x, 1), insn);
2813       sched_analyze_1 (deps, x, insn);
2814 
2815       if (cslr_p && sched_deps_info->finish_rhs)
2816 	sched_deps_info->finish_rhs ();
2817 
2818       return;
2819 
2820     default:
2821       break;
2822     }
2823 
2824   /* Other cases: walk the insn.  */
2825   fmt = GET_RTX_FORMAT (code);
2826   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2827     {
2828       if (fmt[i] == 'e')
2829 	sched_analyze_2 (deps, XEXP (x, i), insn);
2830       else if (fmt[i] == 'E')
2831 	for (j = 0; j < XVECLEN (x, i); j++)
2832 	  sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2833     }
2834 
2835   if (cslr_p && sched_deps_info->finish_rhs)
2836     sched_deps_info->finish_rhs ();
2837 }
2838 
2839 /* Try to group two fusible insns together to prevent scheduler
2840    from scheduling them apart.  */
2841 
2842 static void
sched_macro_fuse_insns(rtx_insn * insn)2843 sched_macro_fuse_insns (rtx_insn *insn)
2844 {
2845   rtx_insn *prev;
2846   /* No target hook would return true for debug insn as any of the
2847      hook operand, and with very large sequences of only debug insns
2848      where on each we call sched_macro_fuse_insns it has quadratic
2849      compile time complexity.  */
2850   if (DEBUG_INSN_P (insn))
2851     return;
2852   prev = prev_nonnote_nondebug_insn (insn);
2853   if (!prev)
2854     return;
2855 
2856   if (any_condjump_p (insn))
2857     {
2858       unsigned int condreg1, condreg2;
2859       rtx cc_reg_1;
2860       if (targetm.fixed_condition_code_regs (&condreg1, &condreg2))
2861 	{
2862 	  cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2863 	  if (reg_referenced_p (cc_reg_1, PATTERN (insn))
2864 	      && modified_in_p (cc_reg_1, prev))
2865 	    {
2866 	      if (targetm.sched.macro_fusion_pair_p (prev, insn))
2867 		SCHED_GROUP_P (insn) = 1;
2868 	      return;
2869 	    }
2870 	}
2871     }
2872 
2873   if (single_set (insn) && single_set (prev))
2874     {
2875       if (targetm.sched.macro_fusion_pair_p (prev, insn))
2876 	SCHED_GROUP_P (insn) = 1;
2877     }
2878 }
2879 
2880 /* Get the implicit reg pending clobbers for INSN and save them in TEMP.  */
2881 void
get_implicit_reg_pending_clobbers(HARD_REG_SET * temp,rtx_insn * insn)2882 get_implicit_reg_pending_clobbers (HARD_REG_SET *temp, rtx_insn *insn)
2883 {
2884   extract_insn (insn);
2885   preprocess_constraints (insn);
2886   alternative_mask preferred = get_preferred_alternatives (insn);
2887   ira_implicitly_set_insn_hard_regs (temp, preferred);
2888   AND_COMPL_HARD_REG_SET (*temp, ira_no_alloc_regs);
2889 }
2890 
2891 /* Analyze an INSN with pattern X to find all dependencies.  */
2892 static void
sched_analyze_insn(struct deps_desc * deps,rtx x,rtx_insn * insn)2893 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
2894 {
2895   RTX_CODE code = GET_CODE (x);
2896   rtx link;
2897   unsigned i;
2898   reg_set_iterator rsi;
2899 
2900   if (! reload_completed)
2901     {
2902       HARD_REG_SET temp;
2903       get_implicit_reg_pending_clobbers (&temp, insn);
2904       IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2905     }
2906 
2907   can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2908 			 && code == SET);
2909 
2910   /* Group compare and branch insns for macro-fusion.  */
2911   if (!deps->readonly
2912       && targetm.sched.macro_fusion_p
2913       && targetm.sched.macro_fusion_p ())
2914     sched_macro_fuse_insns (insn);
2915 
2916   if (may_trap_p (x))
2917     /* Avoid moving trapping instructions across function calls that might
2918        not always return.  */
2919     add_dependence_list (insn, deps->last_function_call_may_noreturn,
2920 			 1, REG_DEP_ANTI, true);
2921 
2922   /* We must avoid creating a situation in which two successors of the
2923      current block have different unwind info after scheduling.  If at any
2924      point the two paths re-join this leads to incorrect unwind info.  */
2925   /* ??? There are certain situations involving a forced frame pointer in
2926      which, with extra effort, we could fix up the unwind info at a later
2927      CFG join.  However, it seems better to notice these cases earlier
2928      during prologue generation and avoid marking the frame pointer setup
2929      as frame-related at all.  */
2930   if (RTX_FRAME_RELATED_P (insn))
2931     {
2932       /* Make sure prologue insn is scheduled before next jump.  */
2933       deps->sched_before_next_jump
2934 	= alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2935 
2936       /* Make sure epilogue insn is scheduled after preceding jumps.  */
2937       add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2938 			   REG_DEP_ANTI, true);
2939       add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2940 			   true);
2941     }
2942 
2943   if (code == COND_EXEC)
2944     {
2945       sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2946 
2947       /* ??? Should be recording conditions so we reduce the number of
2948 	 false dependencies.  */
2949       x = COND_EXEC_CODE (x);
2950       code = GET_CODE (x);
2951     }
2952   if (code == SET || code == CLOBBER)
2953     {
2954       sched_analyze_1 (deps, x, insn);
2955 
2956       /* Bare clobber insns are used for letting life analysis, reg-stack
2957 	 and others know that a value is dead.  Depend on the last call
2958 	 instruction so that reg-stack won't get confused.  */
2959       if (code == CLOBBER)
2960 	add_dependence_list (insn, deps->last_function_call, 1,
2961 			     REG_DEP_OUTPUT, true);
2962     }
2963   else if (code == PARALLEL)
2964     {
2965       for (i = XVECLEN (x, 0); i--;)
2966 	{
2967 	  rtx sub = XVECEXP (x, 0, i);
2968 	  code = GET_CODE (sub);
2969 
2970 	  if (code == COND_EXEC)
2971 	    {
2972 	      sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2973 	      sub = COND_EXEC_CODE (sub);
2974 	      code = GET_CODE (sub);
2975 	    }
2976 	  else if (code == SET || code == CLOBBER || code == CLOBBER_HIGH)
2977 	    sched_analyze_1 (deps, sub, insn);
2978 	  else
2979 	    sched_analyze_2 (deps, sub, insn);
2980 	}
2981     }
2982   else
2983     sched_analyze_2 (deps, x, insn);
2984 
2985   /* Mark registers CLOBBERED or used by called function.  */
2986   if (CALL_P (insn))
2987     {
2988       for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2989 	{
2990 	  if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2991 	    sched_analyze_1 (deps, XEXP (link, 0), insn);
2992 	  else if (GET_CODE (XEXP (link, 0)) == CLOBBER_HIGH)
2993 	    /* We could support CLOBBER_HIGH and treat it in the same way as
2994 	      HARD_REGNO_CALL_PART_CLOBBERED, but no port needs that yet.  */
2995 	    gcc_unreachable ();
2996 	  else if (GET_CODE (XEXP (link, 0)) != SET)
2997 	    sched_analyze_2 (deps, XEXP (link, 0), insn);
2998 	}
2999       /* Don't schedule anything after a tail call, tail call needs
3000 	 to use at least all call-saved registers.  */
3001       if (SIBLING_CALL_P (insn))
3002 	reg_pending_barrier = TRUE_BARRIER;
3003       else if (find_reg_note (insn, REG_SETJMP, NULL))
3004 	reg_pending_barrier = MOVE_BARRIER;
3005     }
3006 
3007   if (JUMP_P (insn))
3008     {
3009       rtx_insn *next = next_nonnote_nondebug_insn (insn);
3010       /* ??? For tablejumps, the barrier may appear not immediately after
3011          the jump, but after a label and a jump_table_data insn.  */
3012       if (next && LABEL_P (next) && NEXT_INSN (next)
3013 	  && JUMP_TABLE_DATA_P (NEXT_INSN (next)))
3014 	next = NEXT_INSN (NEXT_INSN (next));
3015       if (next && BARRIER_P (next))
3016 	reg_pending_barrier = MOVE_BARRIER;
3017       else
3018 	{
3019 	  rtx_insn_list *pending;
3020 	  rtx_expr_list *pending_mem;
3021 
3022           if (sched_deps_info->compute_jump_reg_dependencies)
3023             {
3024               (*sched_deps_info->compute_jump_reg_dependencies)
3025 		(insn, reg_pending_control_uses);
3026 
3027               /* Make latency of jump equal to 0 by using anti-dependence.  */
3028               EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3029                 {
3030                   struct deps_reg *reg_last = &deps->reg_last[i];
3031                   add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
3032 				       false);
3033                   add_dependence_list (insn, reg_last->implicit_sets,
3034 				       0, REG_DEP_ANTI, false);
3035                   add_dependence_list (insn, reg_last->clobbers, 0,
3036 				       REG_DEP_ANTI, false);
3037                 }
3038             }
3039 
3040 	  /* All memory writes and volatile reads must happen before the
3041 	     jump.  Non-volatile reads must happen before the jump iff
3042 	     the result is needed by the above register used mask.  */
3043 
3044 	  pending = deps->pending_write_insns;
3045 	  pending_mem = deps->pending_write_mems;
3046 	  while (pending)
3047 	    {
3048 	      if (! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3049 		add_dependence (insn, pending->insn (),
3050 				REG_DEP_OUTPUT);
3051 	      pending = pending->next ();
3052 	      pending_mem = pending_mem->next ();
3053 	    }
3054 
3055 	  pending = deps->pending_read_insns;
3056 	  pending_mem = deps->pending_read_mems;
3057 	  while (pending)
3058 	    {
3059 	      if (MEM_VOLATILE_P (pending_mem->element ())
3060 		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3061 		add_dependence (insn, pending->insn (),
3062 				REG_DEP_OUTPUT);
3063 	      pending = pending->next ();
3064 	      pending_mem = pending_mem->next ();
3065 	    }
3066 
3067 	  add_dependence_list (insn, deps->last_pending_memory_flush, 1,
3068 			       REG_DEP_ANTI, true);
3069 	  add_dependence_list (insn, deps->pending_jump_insns, 1,
3070 			       REG_DEP_ANTI, true);
3071 	}
3072     }
3073 
3074   /* If this instruction can throw an exception, then moving it changes
3075      where block boundaries fall.  This is mighty confusing elsewhere.
3076      Therefore, prevent such an instruction from being moved.  Same for
3077      non-jump instructions that define block boundaries.
3078      ??? Unclear whether this is still necessary in EBB mode.  If not,
3079      add_branch_dependences should be adjusted for RGN mode instead.  */
3080   if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3081       || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
3082     reg_pending_barrier = MOVE_BARRIER;
3083 
3084   if (sched_pressure != SCHED_PRESSURE_NONE)
3085     {
3086       setup_insn_reg_uses (deps, insn);
3087       init_insn_reg_pressure_info (insn);
3088     }
3089 
3090   /* Add register dependencies for insn.  */
3091   if (DEBUG_INSN_P (insn))
3092     {
3093       rtx_insn *prev = deps->last_debug_insn;
3094       rtx_insn_list *u;
3095 
3096       if (!deps->readonly)
3097 	deps->last_debug_insn = insn;
3098 
3099       if (prev)
3100 	add_dependence (insn, prev, REG_DEP_ANTI);
3101 
3102       add_dependence_list (insn, deps->last_function_call, 1,
3103 			   REG_DEP_ANTI, false);
3104 
3105       if (!sel_sched_p ())
3106 	for (u = deps->last_pending_memory_flush; u; u = u->next ())
3107 	  add_dependence (insn, u->insn (), REG_DEP_ANTI);
3108 
3109       EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3110 	{
3111 	  struct deps_reg *reg_last = &deps->reg_last[i];
3112 	  add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
3113 	  /* There's no point in making REG_DEP_CONTROL dependencies for
3114 	     debug insns.  */
3115 	  add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3116 			       false);
3117 
3118 	  if (!deps->readonly)
3119 	    reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3120 	}
3121       CLEAR_REG_SET (reg_pending_uses);
3122 
3123       /* Quite often, a debug insn will refer to stuff in the
3124 	 previous instruction, but the reason we want this
3125 	 dependency here is to make sure the scheduler doesn't
3126 	 gratuitously move a debug insn ahead.  This could dirty
3127 	 DF flags and cause additional analysis that wouldn't have
3128 	 occurred in compilation without debug insns, and such
3129 	 additional analysis can modify the generated code.  */
3130       prev = PREV_INSN (insn);
3131 
3132       if (prev && NONDEBUG_INSN_P (prev))
3133 	add_dependence (insn, prev, REG_DEP_ANTI);
3134     }
3135   else
3136     {
3137       regset_head set_or_clobbered;
3138 
3139       EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3140 	{
3141 	  struct deps_reg *reg_last = &deps->reg_last[i];
3142 	  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3143 	  add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3144 			       false);
3145 	  add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3146 			       false);
3147 
3148 	  if (!deps->readonly)
3149 	    {
3150 	      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3151 	      reg_last->uses_length++;
3152 	    }
3153 	}
3154 
3155       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3156 	if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3157 	  {
3158 	    struct deps_reg *reg_last = &deps->reg_last[i];
3159 	    add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3160 	    add_dependence_list (insn, reg_last->implicit_sets, 0,
3161 				 REG_DEP_ANTI, false);
3162 	    add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3163 				 false);
3164 
3165 	    if (!deps->readonly)
3166 	      {
3167 		reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3168 		reg_last->uses_length++;
3169 	      }
3170 	  }
3171 
3172       if (targetm.sched.exposed_pipeline)
3173 	{
3174 	  INIT_REG_SET (&set_or_clobbered);
3175 	  bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3176 		      reg_pending_sets);
3177 	  EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3178 	    {
3179 	      struct deps_reg *reg_last = &deps->reg_last[i];
3180 	      rtx list;
3181 	      for (list = reg_last->uses; list; list = XEXP (list, 1))
3182 		{
3183 		  rtx other = XEXP (list, 0);
3184 		  if (INSN_CACHED_COND (other) != const_true_rtx
3185 		      && refers_to_regno_p (i, INSN_CACHED_COND (other)))
3186 		    INSN_CACHED_COND (other) = const_true_rtx;
3187 		}
3188 	    }
3189 	}
3190 
3191       /* If the current insn is conditional, we can't free any
3192 	 of the lists.  */
3193       if (sched_has_condition_p (insn))
3194 	{
3195 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3196 	    {
3197 	      struct deps_reg *reg_last = &deps->reg_last[i];
3198 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3199 				   false);
3200 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3201 				   REG_DEP_ANTI, false);
3202 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3203 				   false);
3204 	      add_dependence_list (insn, reg_last->control_uses, 0,
3205 				   REG_DEP_CONTROL, false);
3206 
3207 	      if (!deps->readonly)
3208 		{
3209 		  reg_last->clobbers
3210 		    = alloc_INSN_LIST (insn, reg_last->clobbers);
3211 		  reg_last->clobbers_length++;
3212 		}
3213 	    }
3214 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3215 	    {
3216 	      struct deps_reg *reg_last = &deps->reg_last[i];
3217 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3218 				   false);
3219 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3220 				   REG_DEP_ANTI, false);
3221 	      add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3222 				   false);
3223 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3224 				   false);
3225 	      add_dependence_list (insn, reg_last->control_uses, 0,
3226 				   REG_DEP_CONTROL, false);
3227 
3228 	      if (!deps->readonly)
3229 		reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3230 	    }
3231 	}
3232       else
3233 	{
3234 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3235 	    {
3236 	      struct deps_reg *reg_last = &deps->reg_last[i];
3237 	      if (reg_last->uses_length >= MAX_PENDING_LIST_LENGTH
3238 		  || reg_last->clobbers_length >= MAX_PENDING_LIST_LENGTH)
3239 		{
3240 		  add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3241 						REG_DEP_OUTPUT, false);
3242 		  add_dependence_list_and_free (deps, insn,
3243 						&reg_last->implicit_sets, 0,
3244 						REG_DEP_ANTI, false);
3245 		  add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3246 						REG_DEP_ANTI, false);
3247 		  add_dependence_list_and_free (deps, insn,
3248 						&reg_last->control_uses, 0,
3249 						REG_DEP_ANTI, false);
3250 		  add_dependence_list_and_free (deps, insn,
3251 						&reg_last->clobbers, 0,
3252 						REG_DEP_OUTPUT, false);
3253 
3254 		  if (!deps->readonly)
3255 		    {
3256 		      reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3257 		      reg_last->clobbers_length = 0;
3258 		      reg_last->uses_length = 0;
3259 		    }
3260 		}
3261 	      else
3262 		{
3263 		  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3264 				       false);
3265 		  add_dependence_list (insn, reg_last->implicit_sets, 0,
3266 				       REG_DEP_ANTI, false);
3267 		  add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3268 				       false);
3269 		  add_dependence_list (insn, reg_last->control_uses, 0,
3270 				       REG_DEP_CONTROL, false);
3271 		}
3272 
3273 	      if (!deps->readonly)
3274 		{
3275 		  reg_last->clobbers_length++;
3276 		  reg_last->clobbers
3277 		    = alloc_INSN_LIST (insn, reg_last->clobbers);
3278 		}
3279 	    }
3280 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3281 	    {
3282 	      struct deps_reg *reg_last = &deps->reg_last[i];
3283 
3284 	      add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3285 					    REG_DEP_OUTPUT, false);
3286 	      add_dependence_list_and_free (deps, insn,
3287 					    &reg_last->implicit_sets,
3288 					    0, REG_DEP_ANTI, false);
3289 	      add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3290 					    REG_DEP_OUTPUT, false);
3291 	      add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3292 					    REG_DEP_ANTI, false);
3293 	      add_dependence_list (insn, reg_last->control_uses, 0,
3294 				   REG_DEP_CONTROL, false);
3295 
3296 	      if (!deps->readonly)
3297 		{
3298 		  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3299 		  reg_last->uses_length = 0;
3300 		  reg_last->clobbers_length = 0;
3301 		}
3302 	    }
3303 	}
3304       if (!deps->readonly)
3305 	{
3306 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3307 	    {
3308 	      struct deps_reg *reg_last = &deps->reg_last[i];
3309 	      reg_last->control_uses
3310 		= alloc_INSN_LIST (insn, reg_last->control_uses);
3311 	    }
3312 	}
3313     }
3314 
3315   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3316     if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3317       {
3318 	struct deps_reg *reg_last = &deps->reg_last[i];
3319 	add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3320 	add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3321 	add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3322 	add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3323 			     false);
3324 
3325 	if (!deps->readonly)
3326 	  reg_last->implicit_sets
3327 	    = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3328       }
3329 
3330   if (!deps->readonly)
3331     {
3332       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3333       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3334       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3335       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3336 	if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3337 	    || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3338 	  SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3339 
3340       /* Set up the pending barrier found.  */
3341       deps->last_reg_pending_barrier = reg_pending_barrier;
3342     }
3343 
3344   CLEAR_REG_SET (reg_pending_uses);
3345   CLEAR_REG_SET (reg_pending_clobbers);
3346   CLEAR_REG_SET (reg_pending_sets);
3347   CLEAR_REG_SET (reg_pending_control_uses);
3348   CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3349   CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3350 
3351   /* Add dependencies if a scheduling barrier was found.  */
3352   if (reg_pending_barrier)
3353     {
3354       /* In the case of barrier the most added dependencies are not
3355          real, so we use anti-dependence here.  */
3356       if (sched_has_condition_p (insn))
3357 	{
3358 	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3359 	    {
3360 	      struct deps_reg *reg_last = &deps->reg_last[i];
3361 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3362 				   true);
3363 	      add_dependence_list (insn, reg_last->sets, 0,
3364 				   reg_pending_barrier == TRUE_BARRIER
3365 				   ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3366 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3367 				   REG_DEP_ANTI, true);
3368 	      add_dependence_list (insn, reg_last->clobbers, 0,
3369 				   reg_pending_barrier == TRUE_BARRIER
3370 				   ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3371 	    }
3372 	}
3373       else
3374 	{
3375 	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3376 	    {
3377 	      struct deps_reg *reg_last = &deps->reg_last[i];
3378 	      add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3379 					    REG_DEP_ANTI, true);
3380 	      add_dependence_list_and_free (deps, insn,
3381 					    &reg_last->control_uses, 0,
3382 					    REG_DEP_CONTROL, true);
3383 	      add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3384 					    reg_pending_barrier == TRUE_BARRIER
3385 					    ? REG_DEP_TRUE : REG_DEP_ANTI,
3386 					    true);
3387 	      add_dependence_list_and_free (deps, insn,
3388 					    &reg_last->implicit_sets, 0,
3389 					    REG_DEP_ANTI, true);
3390 	      add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3391 					    reg_pending_barrier == TRUE_BARRIER
3392 					    ? REG_DEP_TRUE : REG_DEP_ANTI,
3393 					    true);
3394 
3395               if (!deps->readonly)
3396                 {
3397                   reg_last->uses_length = 0;
3398                   reg_last->clobbers_length = 0;
3399                 }
3400 	    }
3401 	}
3402 
3403       if (!deps->readonly)
3404         for (i = 0; i < (unsigned)deps->max_reg; i++)
3405           {
3406             struct deps_reg *reg_last = &deps->reg_last[i];
3407             reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3408             SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3409           }
3410 
3411       /* Don't flush pending lists on speculative checks for
3412 	 selective scheduling.  */
3413       if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
3414 	flush_pending_lists (deps, insn, true, true);
3415 
3416       reg_pending_barrier = NOT_A_BARRIER;
3417     }
3418 
3419   /* If a post-call group is still open, see if it should remain so.
3420      This insn must be a simple move of a hard reg to a pseudo or
3421      vice-versa.
3422 
3423      We must avoid moving these insns for correctness on targets
3424      with small register classes, and for special registers like
3425      PIC_OFFSET_TABLE_REGNUM.  For simplicity, extend this to all
3426      hard regs for all targets.  */
3427 
3428   if (deps->in_post_call_group_p)
3429     {
3430       rtx tmp, set = single_set (insn);
3431       int src_regno, dest_regno;
3432 
3433       if (set == NULL)
3434 	{
3435 	  if (DEBUG_INSN_P (insn))
3436 	    /* We don't want to mark debug insns as part of the same
3437 	       sched group.  We know they really aren't, but if we use
3438 	       debug insns to tell that a call group is over, we'll
3439 	       get different code if debug insns are not there and
3440 	       instructions that follow seem like they should be part
3441 	       of the call group.
3442 
3443 	       Also, if we did, chain_to_prev_insn would move the
3444 	       deps of the debug insn to the call insn, modifying
3445 	       non-debug post-dependency counts of the debug insn
3446 	       dependencies and otherwise messing with the scheduling
3447 	       order.
3448 
3449 	       Instead, let such debug insns be scheduled freely, but
3450 	       keep the call group open in case there are insns that
3451 	       should be part of it afterwards.  Since we grant debug
3452 	       insns higher priority than even sched group insns, it
3453 	       will all turn out all right.  */
3454 	    goto debug_dont_end_call_group;
3455 	  else
3456 	    goto end_call_group;
3457 	}
3458 
3459       tmp = SET_DEST (set);
3460       if (GET_CODE (tmp) == SUBREG)
3461 	tmp = SUBREG_REG (tmp);
3462       if (REG_P (tmp))
3463 	dest_regno = REGNO (tmp);
3464       else
3465 	goto end_call_group;
3466 
3467       tmp = SET_SRC (set);
3468       if (GET_CODE (tmp) == SUBREG)
3469 	tmp = SUBREG_REG (tmp);
3470       if ((GET_CODE (tmp) == PLUS
3471 	   || GET_CODE (tmp) == MINUS)
3472 	  && REG_P (XEXP (tmp, 0))
3473 	  && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3474 	  && dest_regno == STACK_POINTER_REGNUM)
3475 	src_regno = STACK_POINTER_REGNUM;
3476       else if (REG_P (tmp))
3477 	src_regno = REGNO (tmp);
3478       else
3479 	goto end_call_group;
3480 
3481       if (src_regno < FIRST_PSEUDO_REGISTER
3482 	  || dest_regno < FIRST_PSEUDO_REGISTER)
3483 	{
3484 	  if (!deps->readonly
3485               && deps->in_post_call_group_p == post_call_initial)
3486 	    deps->in_post_call_group_p = post_call;
3487 
3488           if (!sel_sched_p () || sched_emulate_haifa_p)
3489             {
3490               SCHED_GROUP_P (insn) = 1;
3491               CANT_MOVE (insn) = 1;
3492             }
3493 	}
3494       else
3495 	{
3496 	end_call_group:
3497           if (!deps->readonly)
3498             deps->in_post_call_group_p = not_post_call;
3499 	}
3500     }
3501 
3502  debug_dont_end_call_group:
3503   if ((current_sched_info->flags & DO_SPECULATION)
3504       && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3505     /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3506        be speculated.  */
3507     {
3508       if (sel_sched_p ())
3509         sel_mark_hard_insn (insn);
3510       else
3511         {
3512           sd_iterator_def sd_it;
3513           dep_t dep;
3514 
3515           for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3516                sd_iterator_cond (&sd_it, &dep);)
3517             change_spec_dep_to_hard (sd_it);
3518         }
3519     }
3520 
3521   /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3522      honor their original ordering.  */
3523   if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3524     {
3525       if (deps->last_args_size)
3526 	add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
3527       if (!deps->readonly)
3528 	deps->last_args_size = insn;
3529     }
3530 
3531   /* We must not mix prologue and epilogue insns.  See PR78029.  */
3532   if (prologue_contains (insn))
3533     {
3534       add_dependence_list (insn, deps->last_epilogue, true, REG_DEP_ANTI, true);
3535       if (!deps->readonly)
3536 	{
3537 	  if (deps->last_logue_was_epilogue)
3538 	    free_INSN_LIST_list (&deps->last_prologue);
3539 	  deps->last_prologue = alloc_INSN_LIST (insn, deps->last_prologue);
3540 	  deps->last_logue_was_epilogue = false;
3541 	}
3542     }
3543 
3544   if (epilogue_contains (insn))
3545     {
3546       add_dependence_list (insn, deps->last_prologue, true, REG_DEP_ANTI, true);
3547       if (!deps->readonly)
3548 	{
3549 	  if (!deps->last_logue_was_epilogue)
3550 	    free_INSN_LIST_list (&deps->last_epilogue);
3551 	  deps->last_epilogue = alloc_INSN_LIST (insn, deps->last_epilogue);
3552 	  deps->last_logue_was_epilogue = true;
3553 	}
3554     }
3555 }
3556 
3557 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3558    longjmp, loop forever, ...).  */
3559 /* FIXME: Why can't this function just use flags_from_decl_or_type and
3560    test for ECF_NORETURN?  */
3561 static bool
call_may_noreturn_p(rtx_insn * insn)3562 call_may_noreturn_p (rtx_insn *insn)
3563 {
3564   rtx call;
3565 
3566   /* const or pure calls that aren't looping will always return.  */
3567   if (RTL_CONST_OR_PURE_CALL_P (insn)
3568       && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3569     return false;
3570 
3571   call = get_call_rtx_from (insn);
3572   if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3573     {
3574       rtx symbol = XEXP (XEXP (call, 0), 0);
3575       if (SYMBOL_REF_DECL (symbol)
3576 	  && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3577 	{
3578 	  if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3579 	      == BUILT_IN_NORMAL)
3580 	    switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3581 	      {
3582 	      case BUILT_IN_BCMP:
3583 	      case BUILT_IN_BCOPY:
3584 	      case BUILT_IN_BZERO:
3585 	      case BUILT_IN_INDEX:
3586 	      case BUILT_IN_MEMCHR:
3587 	      case BUILT_IN_MEMCMP:
3588 	      case BUILT_IN_MEMCPY:
3589 	      case BUILT_IN_MEMMOVE:
3590 	      case BUILT_IN_MEMPCPY:
3591 	      case BUILT_IN_MEMSET:
3592 	      case BUILT_IN_RINDEX:
3593 	      case BUILT_IN_STPCPY:
3594 	      case BUILT_IN_STPNCPY:
3595 	      case BUILT_IN_STRCAT:
3596 	      case BUILT_IN_STRCHR:
3597 	      case BUILT_IN_STRCMP:
3598 	      case BUILT_IN_STRCPY:
3599 	      case BUILT_IN_STRCSPN:
3600 	      case BUILT_IN_STRLEN:
3601 	      case BUILT_IN_STRNCAT:
3602 	      case BUILT_IN_STRNCMP:
3603 	      case BUILT_IN_STRNCPY:
3604 	      case BUILT_IN_STRPBRK:
3605 	      case BUILT_IN_STRRCHR:
3606 	      case BUILT_IN_STRSPN:
3607 	      case BUILT_IN_STRSTR:
3608 		/* Assume certain string/memory builtins always return.  */
3609 		return false;
3610 	      default:
3611 		break;
3612 	      }
3613 	}
3614     }
3615 
3616   /* For all other calls assume that they might not always return.  */
3617   return true;
3618 }
3619 
3620 /* Return true if INSN should be made dependent on the previous instruction
3621    group, and if all INSN's dependencies should be moved to the first
3622    instruction of that group.  */
3623 
3624 static bool
chain_to_prev_insn_p(rtx_insn * insn)3625 chain_to_prev_insn_p (rtx_insn *insn)
3626 {
3627   /* INSN forms a group with the previous instruction.  */
3628   if (SCHED_GROUP_P (insn))
3629     return true;
3630 
3631   /* If the previous instruction clobbers a register R and this one sets
3632      part of R, the clobber was added specifically to help us track the
3633      liveness of R.  There's no point scheduling the clobber and leaving
3634      INSN behind, especially if we move the clobber to another block.  */
3635   rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
3636   if (prev
3637       && INSN_P (prev)
3638       && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3639       && GET_CODE (PATTERN (prev)) == CLOBBER)
3640     {
3641       rtx x = XEXP (PATTERN (prev), 0);
3642       if (set_of (x, insn))
3643 	return true;
3644     }
3645 
3646   return false;
3647 }
3648 
3649 /* Analyze INSN with DEPS as a context.  */
3650 void
deps_analyze_insn(struct deps_desc * deps,rtx_insn * insn)3651 deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
3652 {
3653   if (sched_deps_info->start_insn)
3654     sched_deps_info->start_insn (insn);
3655 
3656   /* Record the condition for this insn.  */
3657   if (NONDEBUG_INSN_P (insn))
3658     {
3659       rtx t;
3660       sched_get_condition_with_rev (insn, NULL);
3661       t = INSN_CACHED_COND (insn);
3662       INSN_COND_DEPS (insn) = NULL;
3663       if (reload_completed
3664 	  && (current_sched_info->flags & DO_PREDICATION)
3665 	  && COMPARISON_P (t)
3666 	  && REG_P (XEXP (t, 0))
3667 	  && CONSTANT_P (XEXP (t, 1)))
3668 	{
3669 	  unsigned int regno;
3670 	  int nregs;
3671 	  rtx_insn_list *cond_deps = NULL;
3672 	  t = XEXP (t, 0);
3673 	  regno = REGNO (t);
3674 	  nregs = REG_NREGS (t);
3675 	  while (nregs-- > 0)
3676 	    {
3677 	      struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3678 	      cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
3679 	      cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
3680 	      cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
3681 	    }
3682 	  INSN_COND_DEPS (insn) = cond_deps;
3683 	}
3684     }
3685 
3686   if (JUMP_P (insn))
3687     {
3688       /* Make each JUMP_INSN (but not a speculative check)
3689          a scheduling barrier for memory references.  */
3690       if (!deps->readonly
3691           && !(sel_sched_p ()
3692                && sel_insn_is_speculation_check (insn)))
3693         {
3694           /* Keep the list a reasonable size.  */
3695           if (deps->pending_flush_length++ >= MAX_PENDING_LIST_LENGTH)
3696             flush_pending_lists (deps, insn, true, true);
3697           else
3698 	    deps->pending_jump_insns
3699               = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3700         }
3701 
3702       /* For each insn which shouldn't cross a jump, add a dependence.  */
3703       add_dependence_list_and_free (deps, insn,
3704 				    &deps->sched_before_next_jump, 1,
3705 				    REG_DEP_ANTI, true);
3706 
3707       sched_analyze_insn (deps, PATTERN (insn), insn);
3708     }
3709   else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3710     {
3711       sched_analyze_insn (deps, PATTERN (insn), insn);
3712     }
3713   else if (CALL_P (insn))
3714     {
3715       int i;
3716 
3717       CANT_MOVE (insn) = 1;
3718 
3719       if (find_reg_note (insn, REG_SETJMP, NULL))
3720         {
3721           /* This is setjmp.  Assume that all registers, not just
3722              hard registers, may be clobbered by this call.  */
3723           reg_pending_barrier = MOVE_BARRIER;
3724         }
3725       else
3726         {
3727           for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3728             /* A call may read and modify global register variables.  */
3729             if (global_regs[i])
3730               {
3731                 SET_REGNO_REG_SET (reg_pending_sets, i);
3732                 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3733               }
3734           /* Other call-clobbered hard regs may be clobbered.
3735              Since we only have a choice between 'might be clobbered'
3736              and 'definitely not clobbered', we must include all
3737              partly call-clobbered registers here.  */
3738 	    else if (targetm.hard_regno_call_part_clobbered (insn, i,
3739 							     reg_raw_mode[i])
3740                      || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3741               SET_REGNO_REG_SET (reg_pending_clobbers, i);
3742           /* We don't know what set of fixed registers might be used
3743              by the function, but it is certain that the stack pointer
3744              is among them, but be conservative.  */
3745             else if (fixed_regs[i])
3746 	      SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3747           /* The frame pointer is normally not used by the function
3748              itself, but by the debugger.  */
3749           /* ??? MIPS o32 is an exception.  It uses the frame pointer
3750              in the macro expansion of jal but does not represent this
3751              fact in the call_insn rtl.  */
3752             else if (i == FRAME_POINTER_REGNUM
3753                      || (i == HARD_FRAME_POINTER_REGNUM
3754                          && (! reload_completed || frame_pointer_needed)))
3755 	      SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3756         }
3757 
3758       /* For each insn which shouldn't cross a call, add a dependence
3759          between that insn and this call insn.  */
3760       add_dependence_list_and_free (deps, insn,
3761                                     &deps->sched_before_next_call, 1,
3762                                     REG_DEP_ANTI, true);
3763 
3764       sched_analyze_insn (deps, PATTERN (insn), insn);
3765 
3766       /* If CALL would be in a sched group, then this will violate
3767 	 convention that sched group insns have dependencies only on the
3768 	 previous instruction.
3769 
3770 	 Of course one can say: "Hey!  What about head of the sched group?"
3771 	 And I will answer: "Basic principles (one dep per insn) are always
3772 	 the same."  */
3773       gcc_assert (!SCHED_GROUP_P (insn));
3774 
3775       /* In the absence of interprocedural alias analysis, we must flush
3776          all pending reads and writes, and start new dependencies starting
3777          from here.  But only flush writes for constant calls (which may
3778          be passed a pointer to something we haven't written yet).  */
3779       flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3780 
3781       if (!deps->readonly)
3782         {
3783           /* Remember the last function call for limiting lifetimes.  */
3784           free_INSN_LIST_list (&deps->last_function_call);
3785           deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3786 
3787 	  if (call_may_noreturn_p (insn))
3788 	    {
3789 	      /* Remember the last function call that might not always return
3790 		 normally for limiting moves of trapping insns.  */
3791 	      free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3792 	      deps->last_function_call_may_noreturn
3793 		= alloc_INSN_LIST (insn, NULL_RTX);
3794 	    }
3795 
3796           /* Before reload, begin a post-call group, so as to keep the
3797              lifetimes of hard registers correct.  */
3798           if (! reload_completed)
3799             deps->in_post_call_group_p = post_call;
3800         }
3801     }
3802 
3803   if (sched_deps_info->use_cselib)
3804     cselib_process_insn (insn);
3805 
3806   if (sched_deps_info->finish_insn)
3807     sched_deps_info->finish_insn ();
3808 
3809   /* Fixup the dependencies in the sched group.  */
3810   if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3811       && chain_to_prev_insn_p (insn)
3812       && !sel_sched_p ())
3813     chain_to_prev_insn (insn);
3814 }
3815 
3816 /* Initialize DEPS for the new block beginning with HEAD.  */
3817 void
deps_start_bb(struct deps_desc * deps,rtx_insn * head)3818 deps_start_bb (struct deps_desc *deps, rtx_insn *head)
3819 {
3820   gcc_assert (!deps->readonly);
3821 
3822   /* Before reload, if the previous block ended in a call, show that
3823      we are inside a post-call group, so as to keep the lifetimes of
3824      hard registers correct.  */
3825   if (! reload_completed && !LABEL_P (head))
3826     {
3827       rtx_insn *insn = prev_nonnote_nondebug_insn (head);
3828 
3829       if (insn && CALL_P (insn))
3830 	deps->in_post_call_group_p = post_call_initial;
3831     }
3832 }
3833 
3834 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3835    dependencies for each insn.  */
3836 void
sched_analyze(struct deps_desc * deps,rtx_insn * head,rtx_insn * tail)3837 sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
3838 {
3839   rtx_insn *insn;
3840 
3841   if (sched_deps_info->use_cselib)
3842     cselib_init (CSELIB_RECORD_MEMORY);
3843 
3844   deps_start_bb (deps, head);
3845 
3846   for (insn = head;; insn = NEXT_INSN (insn))
3847     {
3848 
3849       if (INSN_P (insn))
3850 	{
3851 	  /* And initialize deps_lists.  */
3852 	  sd_init_insn (insn);
3853 	  /* Clean up SCHED_GROUP_P which may be set by last
3854 	     scheduler pass.  */
3855 	  if (SCHED_GROUP_P (insn))
3856 	    SCHED_GROUP_P (insn) = 0;
3857 	}
3858 
3859       deps_analyze_insn (deps, insn);
3860 
3861       if (insn == tail)
3862 	{
3863 	  if (sched_deps_info->use_cselib)
3864 	    cselib_finish ();
3865 	  return;
3866 	}
3867     }
3868   gcc_unreachable ();
3869 }
3870 
3871 /* Helper for sched_free_deps ().
3872    Delete INSN's (RESOLVED_P) backward dependencies.  */
3873 static void
delete_dep_nodes_in_back_deps(rtx_insn * insn,bool resolved_p)3874 delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p)
3875 {
3876   sd_iterator_def sd_it;
3877   dep_t dep;
3878   sd_list_types_def types;
3879 
3880   if (resolved_p)
3881     types = SD_LIST_RES_BACK;
3882   else
3883     types = SD_LIST_BACK;
3884 
3885   for (sd_it = sd_iterator_start (insn, types);
3886        sd_iterator_cond (&sd_it, &dep);)
3887     {
3888       dep_link_t link = *sd_it.linkp;
3889       dep_node_t node = DEP_LINK_NODE (link);
3890       deps_list_t back_list;
3891       deps_list_t forw_list;
3892 
3893       get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3894       remove_from_deps_list (link, back_list);
3895       delete_dep_node (node);
3896     }
3897 }
3898 
3899 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3900    deps_lists.  */
3901 void
sched_free_deps(rtx_insn * head,rtx_insn * tail,bool resolved_p)3902 sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
3903 {
3904   rtx_insn *insn;
3905   rtx_insn *next_tail = NEXT_INSN (tail);
3906 
3907   /* We make two passes since some insns may be scheduled before their
3908      dependencies are resolved.  */
3909   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3910     if (INSN_P (insn) && INSN_LUID (insn) > 0)
3911       {
3912 	/* Clear forward deps and leave the dep_nodes to the
3913 	   corresponding back_deps list.  */
3914 	if (resolved_p)
3915 	  clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3916 	else
3917 	  clear_deps_list (INSN_FORW_DEPS (insn));
3918       }
3919   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3920     if (INSN_P (insn) && INSN_LUID (insn) > 0)
3921       {
3922 	/* Clear resolved back deps together with its dep_nodes.  */
3923 	delete_dep_nodes_in_back_deps (insn, resolved_p);
3924 
3925 	sd_finish_insn (insn);
3926       }
3927 }
3928 
3929 /* Initialize variables for region data dependence analysis.
3930    When LAZY_REG_LAST is true, do not allocate reg_last array
3931    of struct deps_desc immediately.  */
3932 
3933 void
init_deps(struct deps_desc * deps,bool lazy_reg_last)3934 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3935 {
3936   int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3937 
3938   deps->max_reg = max_reg;
3939   if (lazy_reg_last)
3940     deps->reg_last = NULL;
3941   else
3942     deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3943   INIT_REG_SET (&deps->reg_last_in_use);
3944 
3945   deps->pending_read_insns = 0;
3946   deps->pending_read_mems = 0;
3947   deps->pending_write_insns = 0;
3948   deps->pending_write_mems = 0;
3949   deps->pending_jump_insns = 0;
3950   deps->pending_read_list_length = 0;
3951   deps->pending_write_list_length = 0;
3952   deps->pending_flush_length = 0;
3953   deps->last_pending_memory_flush = 0;
3954   deps->last_function_call = 0;
3955   deps->last_function_call_may_noreturn = 0;
3956   deps->sched_before_next_call = 0;
3957   deps->sched_before_next_jump = 0;
3958   deps->in_post_call_group_p = not_post_call;
3959   deps->last_debug_insn = 0;
3960   deps->last_args_size = 0;
3961   deps->last_prologue = 0;
3962   deps->last_epilogue = 0;
3963   deps->last_logue_was_epilogue = false;
3964   deps->last_reg_pending_barrier = NOT_A_BARRIER;
3965   deps->readonly = 0;
3966 }
3967 
3968 /* Init only reg_last field of DEPS, which was not allocated before as
3969    we inited DEPS lazily.  */
3970 void
init_deps_reg_last(struct deps_desc * deps)3971 init_deps_reg_last (struct deps_desc *deps)
3972 {
3973   gcc_assert (deps && deps->max_reg > 0);
3974   gcc_assert (deps->reg_last == NULL);
3975 
3976   deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3977 }
3978 
3979 
3980 /* Free insn lists found in DEPS.  */
3981 
3982 void
free_deps(struct deps_desc * deps)3983 free_deps (struct deps_desc *deps)
3984 {
3985   unsigned i;
3986   reg_set_iterator rsi;
3987 
3988   /* We set max_reg to 0 when this context was already freed.  */
3989   if (deps->max_reg == 0)
3990     {
3991       gcc_assert (deps->reg_last == NULL);
3992       return;
3993     }
3994   deps->max_reg = 0;
3995 
3996   free_INSN_LIST_list (&deps->pending_read_insns);
3997   free_EXPR_LIST_list (&deps->pending_read_mems);
3998   free_INSN_LIST_list (&deps->pending_write_insns);
3999   free_EXPR_LIST_list (&deps->pending_write_mems);
4000   free_INSN_LIST_list (&deps->last_pending_memory_flush);
4001 
4002   /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
4003      times.  For a testcase with 42000 regs and 8000 small basic blocks,
4004      this loop accounted for nearly 60% (84 sec) of the total -O2 runtime.  */
4005   EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
4006     {
4007       struct deps_reg *reg_last = &deps->reg_last[i];
4008       if (reg_last->uses)
4009 	free_INSN_LIST_list (&reg_last->uses);
4010       if (reg_last->sets)
4011 	free_INSN_LIST_list (&reg_last->sets);
4012       if (reg_last->implicit_sets)
4013 	free_INSN_LIST_list (&reg_last->implicit_sets);
4014       if (reg_last->control_uses)
4015 	free_INSN_LIST_list (&reg_last->control_uses);
4016       if (reg_last->clobbers)
4017 	free_INSN_LIST_list (&reg_last->clobbers);
4018     }
4019   CLEAR_REG_SET (&deps->reg_last_in_use);
4020 
4021   /* As we initialize reg_last lazily, it is possible that we didn't allocate
4022      it at all.  */
4023   free (deps->reg_last);
4024   deps->reg_last = NULL;
4025 
4026   deps = NULL;
4027 }
4028 
4029 /* Remove INSN from dependence contexts DEPS.  */
4030 void
remove_from_deps(struct deps_desc * deps,rtx_insn * insn)4031 remove_from_deps (struct deps_desc *deps, rtx_insn *insn)
4032 {
4033   int removed;
4034   unsigned i;
4035   reg_set_iterator rsi;
4036 
4037   removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
4038                                                &deps->pending_read_mems);
4039   if (!DEBUG_INSN_P (insn))
4040     deps->pending_read_list_length -= removed;
4041   removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
4042                                                &deps->pending_write_mems);
4043   deps->pending_write_list_length -= removed;
4044 
4045   removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
4046   deps->pending_flush_length -= removed;
4047   removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
4048   deps->pending_flush_length -= removed;
4049 
4050   unsigned to_clear = -1U;
4051   EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
4052     {
4053       if (to_clear != -1U)
4054 	{
4055 	  CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
4056 	  to_clear = -1U;
4057 	}
4058       struct deps_reg *reg_last = &deps->reg_last[i];
4059       if (reg_last->uses)
4060 	remove_from_dependence_list (insn, &reg_last->uses);
4061       if (reg_last->sets)
4062 	remove_from_dependence_list (insn, &reg_last->sets);
4063       if (reg_last->implicit_sets)
4064 	remove_from_dependence_list (insn, &reg_last->implicit_sets);
4065       if (reg_last->clobbers)
4066 	remove_from_dependence_list (insn, &reg_last->clobbers);
4067       if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
4068 	  && !reg_last->clobbers)
4069 	to_clear = i;
4070     }
4071   if (to_clear != -1U)
4072     CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
4073 
4074   if (CALL_P (insn))
4075     {
4076       remove_from_dependence_list (insn, &deps->last_function_call);
4077       remove_from_dependence_list (insn,
4078 				   &deps->last_function_call_may_noreturn);
4079     }
4080   remove_from_dependence_list (insn, &deps->sched_before_next_call);
4081 }
4082 
4083 /* Init deps data vector.  */
4084 static void
init_deps_data_vector(void)4085 init_deps_data_vector (void)
4086 {
4087   int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4088   if (reserve > 0 && ! h_d_i_d.space (reserve))
4089     h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
4090 }
4091 
4092 /* If it is profitable to use them, initialize or extend (depending on
4093    GLOBAL_P) dependency data.  */
4094 void
sched_deps_init(bool global_p)4095 sched_deps_init (bool global_p)
4096 {
4097   /* Average number of insns in the basic block.
4098      '+ 1' is used to make it nonzero.  */
4099   int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
4100 
4101   init_deps_data_vector ();
4102 
4103   /* We use another caching mechanism for selective scheduling, so
4104      we don't use this one.  */
4105   if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4106     {
4107       /* ?!? We could save some memory by computing a per-region luid mapping
4108          which could reduce both the number of vectors in the cache and the
4109          size of each vector.  Instead we just avoid the cache entirely unless
4110          the average number of instructions in a basic block is very high.  See
4111          the comment before the declaration of true_dependency_cache for
4112          what we consider "very high".  */
4113       cache_size = 0;
4114       extend_dependency_caches (sched_max_luid, true);
4115     }
4116 
4117   if (global_p)
4118     {
4119       dl_pool = new object_allocator<_deps_list> ("deps_list");
4120 				/* Allocate lists for one block at a time.  */
4121       dn_pool = new object_allocator<_dep_node> ("dep_node");
4122 				/* Allocate nodes for one block at a time.  */
4123     }
4124 }
4125 
4126 
4127 /* Create or extend (depending on CREATE_P) dependency caches to
4128    size N.  */
4129 void
extend_dependency_caches(int n,bool create_p)4130 extend_dependency_caches (int n, bool create_p)
4131 {
4132   if (create_p || true_dependency_cache)
4133     {
4134       int i, luid = cache_size + n;
4135 
4136       true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4137 					  luid);
4138       output_dependency_cache = XRESIZEVEC (bitmap_head,
4139 					    output_dependency_cache, luid);
4140       anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4141 					  luid);
4142       control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4143 					  luid);
4144 
4145       if (current_sched_info->flags & DO_SPECULATION)
4146         spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4147 					    luid);
4148 
4149       for (i = cache_size; i < luid; i++)
4150 	{
4151 	  bitmap_initialize (&true_dependency_cache[i], 0);
4152 	  bitmap_initialize (&output_dependency_cache[i], 0);
4153 	  bitmap_initialize (&anti_dependency_cache[i], 0);
4154 	  bitmap_initialize (&control_dependency_cache[i], 0);
4155 
4156           if (current_sched_info->flags & DO_SPECULATION)
4157             bitmap_initialize (&spec_dependency_cache[i], 0);
4158 	}
4159       cache_size = luid;
4160     }
4161 }
4162 
4163 /* Finalize dependency information for the whole function.  */
4164 void
sched_deps_finish(void)4165 sched_deps_finish (void)
4166 {
4167   gcc_assert (deps_pools_are_empty_p ());
4168   delete dn_pool;
4169   delete dl_pool;
4170   dn_pool = NULL;
4171   dl_pool = NULL;
4172 
4173   h_d_i_d.release ();
4174   cache_size = 0;
4175 
4176   if (true_dependency_cache)
4177     {
4178       int i;
4179 
4180       for (i = 0; i < cache_size; i++)
4181 	{
4182 	  bitmap_clear (&true_dependency_cache[i]);
4183 	  bitmap_clear (&output_dependency_cache[i]);
4184 	  bitmap_clear (&anti_dependency_cache[i]);
4185 	  bitmap_clear (&control_dependency_cache[i]);
4186 
4187           if (sched_deps_info->generate_spec_deps)
4188             bitmap_clear (&spec_dependency_cache[i]);
4189 	}
4190       free (true_dependency_cache);
4191       true_dependency_cache = NULL;
4192       free (output_dependency_cache);
4193       output_dependency_cache = NULL;
4194       free (anti_dependency_cache);
4195       anti_dependency_cache = NULL;
4196       free (control_dependency_cache);
4197       control_dependency_cache = NULL;
4198 
4199       if (sched_deps_info->generate_spec_deps)
4200         {
4201           free (spec_dependency_cache);
4202           spec_dependency_cache = NULL;
4203         }
4204 
4205     }
4206 }
4207 
4208 /* Initialize some global variables needed by the dependency analysis
4209    code.  */
4210 
4211 void
init_deps_global(void)4212 init_deps_global (void)
4213 {
4214   CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4215   CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4216   reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4217   reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4218   reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
4219   reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
4220   reg_pending_barrier = NOT_A_BARRIER;
4221 
4222   if (!sel_sched_p () || sched_emulate_haifa_p)
4223     {
4224       sched_deps_info->start_insn = haifa_start_insn;
4225       sched_deps_info->finish_insn = haifa_finish_insn;
4226 
4227       sched_deps_info->note_reg_set = haifa_note_reg_set;
4228       sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4229       sched_deps_info->note_reg_use = haifa_note_reg_use;
4230 
4231       sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4232       sched_deps_info->note_dep = haifa_note_dep;
4233    }
4234 }
4235 
4236 /* Free everything used by the dependency analysis code.  */
4237 
4238 void
finish_deps_global(void)4239 finish_deps_global (void)
4240 {
4241   FREE_REG_SET (reg_pending_sets);
4242   FREE_REG_SET (reg_pending_clobbers);
4243   FREE_REG_SET (reg_pending_uses);
4244   FREE_REG_SET (reg_pending_control_uses);
4245 }
4246 
4247 /* Estimate the weakness of dependence between MEM1 and MEM2.  */
4248 dw_t
estimate_dep_weak(rtx mem1,rtx mem2)4249 estimate_dep_weak (rtx mem1, rtx mem2)
4250 {
4251   if (mem1 == mem2)
4252     /* MEMs are the same - don't speculate.  */
4253     return MIN_DEP_WEAK;
4254 
4255   rtx r1 = XEXP (mem1, 0);
4256   rtx r2 = XEXP (mem2, 0);
4257 
4258   if (sched_deps_info->use_cselib)
4259     {
4260       /* We cannot call rtx_equal_for_cselib_p because the VALUEs might be
4261 	 dangling at this point, since we never preserve them.  Instead we
4262 	 canonicalize manually to get stable VALUEs out of hashing.  */
4263       if (GET_CODE (r1) == VALUE && CSELIB_VAL_PTR (r1))
4264 	r1 = canonical_cselib_val (CSELIB_VAL_PTR (r1))->val_rtx;
4265       if (GET_CODE (r2) == VALUE && CSELIB_VAL_PTR (r2))
4266 	r2 = canonical_cselib_val (CSELIB_VAL_PTR (r2))->val_rtx;
4267     }
4268 
4269   if (r1 == r2
4270       || (REG_P (r1) && REG_P (r2) && REGNO (r1) == REGNO (r2)))
4271     /* Again, MEMs are the same.  */
4272     return MIN_DEP_WEAK;
4273   else if ((REG_P (r1) && !REG_P (r2)) || (!REG_P (r1) && REG_P (r2)))
4274     /* Different addressing modes - reason to be more speculative,
4275        than usual.  */
4276     return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4277   else
4278     /* We can't say anything about the dependence.  */
4279     return UNCERTAIN_DEP_WEAK;
4280 }
4281 
4282 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4283    This function can handle same INSN and ELEM (INSN == ELEM).
4284    It is a convenience wrapper.  */
4285 static void
add_dependence_1(rtx_insn * insn,rtx_insn * elem,enum reg_note dep_type)4286 add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
4287 {
4288   ds_t ds;
4289   bool internal;
4290 
4291   if (dep_type == REG_DEP_TRUE)
4292     ds = DEP_TRUE;
4293   else if (dep_type == REG_DEP_OUTPUT)
4294     ds = DEP_OUTPUT;
4295   else if (dep_type == REG_DEP_CONTROL)
4296     ds = DEP_CONTROL;
4297   else
4298     {
4299       gcc_assert (dep_type == REG_DEP_ANTI);
4300       ds = DEP_ANTI;
4301     }
4302 
4303   /* When add_dependence is called from inside sched-deps.c, we expect
4304      cur_insn to be non-null.  */
4305   internal = cur_insn != NULL;
4306   if (internal)
4307     gcc_assert (insn == cur_insn);
4308   else
4309     cur_insn = insn;
4310 
4311   note_dep (elem, ds);
4312   if (!internal)
4313     cur_insn = NULL;
4314 }
4315 
4316 /* Return weakness of speculative type TYPE in the dep_status DS,
4317    without checking to prevent ICEs on malformed input.  */
4318 static dw_t
get_dep_weak_1(ds_t ds,ds_t type)4319 get_dep_weak_1 (ds_t ds, ds_t type)
4320 {
4321   ds = ds & type;
4322 
4323   switch (type)
4324     {
4325     case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4326     case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4327     case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4328     case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4329     default: gcc_unreachable ();
4330     }
4331 
4332   return (dw_t) ds;
4333 }
4334 
4335 /* Return weakness of speculative type TYPE in the dep_status DS.  */
4336 dw_t
get_dep_weak(ds_t ds,ds_t type)4337 get_dep_weak (ds_t ds, ds_t type)
4338 {
4339   dw_t dw = get_dep_weak_1 (ds, type);
4340 
4341   gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4342   return dw;
4343 }
4344 
4345 /* Return the dep_status, which has the same parameters as DS, except for
4346    speculative type TYPE, that will have weakness DW.  */
4347 ds_t
set_dep_weak(ds_t ds,ds_t type,dw_t dw)4348 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4349 {
4350   gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4351 
4352   ds &= ~type;
4353   switch (type)
4354     {
4355     case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4356     case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4357     case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4358     case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4359     default: gcc_unreachable ();
4360     }
4361   return ds;
4362 }
4363 
4364 /* Return the join of two dep_statuses DS1 and DS2.
4365    If MAX_P is true then choose the greater probability,
4366    otherwise multiply probabilities.
4367    This function assumes that both DS1 and DS2 contain speculative bits.  */
4368 static ds_t
ds_merge_1(ds_t ds1,ds_t ds2,bool max_p)4369 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4370 {
4371   ds_t ds, t;
4372 
4373   gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4374 
4375   ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4376 
4377   t = FIRST_SPEC_TYPE;
4378   do
4379     {
4380       if ((ds1 & t) && !(ds2 & t))
4381 	ds |= ds1 & t;
4382       else if (!(ds1 & t) && (ds2 & t))
4383 	ds |= ds2 & t;
4384       else if ((ds1 & t) && (ds2 & t))
4385 	{
4386 	  dw_t dw1 = get_dep_weak (ds1, t);
4387 	  dw_t dw2 = get_dep_weak (ds2, t);
4388 	  ds_t dw;
4389 
4390 	  if (!max_p)
4391 	    {
4392 	      dw = ((ds_t) dw1) * ((ds_t) dw2);
4393 	      dw /= MAX_DEP_WEAK;
4394 	      if (dw < MIN_DEP_WEAK)
4395 		dw = MIN_DEP_WEAK;
4396 	    }
4397 	  else
4398 	    {
4399 	      if (dw1 >= dw2)
4400 		dw = dw1;
4401 	      else
4402 		dw = dw2;
4403 	    }
4404 
4405 	  ds = set_dep_weak (ds, t, (dw_t) dw);
4406 	}
4407 
4408       if (t == LAST_SPEC_TYPE)
4409 	break;
4410       t <<= SPEC_TYPE_SHIFT;
4411     }
4412   while (1);
4413 
4414   return ds;
4415 }
4416 
4417 /* Return the join of two dep_statuses DS1 and DS2.
4418    This function assumes that both DS1 and DS2 contain speculative bits.  */
4419 ds_t
ds_merge(ds_t ds1,ds_t ds2)4420 ds_merge (ds_t ds1, ds_t ds2)
4421 {
4422   return ds_merge_1 (ds1, ds2, false);
4423 }
4424 
4425 /* Return the join of two dep_statuses DS1 and DS2.  */
4426 ds_t
ds_full_merge(ds_t ds,ds_t ds2,rtx mem1,rtx mem2)4427 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4428 {
4429   ds_t new_status = ds | ds2;
4430 
4431   if (new_status & SPECULATIVE)
4432     {
4433       if ((ds && !(ds & SPECULATIVE))
4434 	  || (ds2 && !(ds2 & SPECULATIVE)))
4435 	/* Then this dep can't be speculative.  */
4436 	new_status &= ~SPECULATIVE;
4437       else
4438 	{
4439 	  /* Both are speculative.  Merging probabilities.  */
4440 	  if (mem1)
4441 	    {
4442 	      dw_t dw;
4443 
4444 	      dw = estimate_dep_weak (mem1, mem2);
4445 	      ds = set_dep_weak (ds, BEGIN_DATA, dw);
4446 	    }
4447 
4448 	  if (!ds)
4449 	    new_status = ds2;
4450 	  else if (!ds2)
4451 	    new_status = ds;
4452 	  else
4453 	    new_status = ds_merge (ds2, ds);
4454 	}
4455     }
4456 
4457   return new_status;
4458 }
4459 
4460 /* Return the join of DS1 and DS2.  Use maximum instead of multiplying
4461    probabilities.  */
4462 ds_t
ds_max_merge(ds_t ds1,ds_t ds2)4463 ds_max_merge (ds_t ds1, ds_t ds2)
4464 {
4465   if (ds1 == 0 && ds2 == 0)
4466     return 0;
4467 
4468   if (ds1 == 0 && ds2 != 0)
4469     return ds2;
4470 
4471   if (ds1 != 0 && ds2 == 0)
4472     return ds1;
4473 
4474   return ds_merge_1 (ds1, ds2, true);
4475 }
4476 
4477 /* Return the probability of speculation success for the speculation
4478    status DS.  */
4479 dw_t
ds_weak(ds_t ds)4480 ds_weak (ds_t ds)
4481 {
4482   ds_t res = 1, dt;
4483   int n = 0;
4484 
4485   dt = FIRST_SPEC_TYPE;
4486   do
4487     {
4488       if (ds & dt)
4489 	{
4490 	  res *= (ds_t) get_dep_weak (ds, dt);
4491 	  n++;
4492 	}
4493 
4494       if (dt == LAST_SPEC_TYPE)
4495 	break;
4496       dt <<= SPEC_TYPE_SHIFT;
4497     }
4498   while (1);
4499 
4500   gcc_assert (n);
4501   while (--n)
4502     res /= MAX_DEP_WEAK;
4503 
4504   if (res < MIN_DEP_WEAK)
4505     res = MIN_DEP_WEAK;
4506 
4507   gcc_assert (res <= MAX_DEP_WEAK);
4508 
4509   return (dw_t) res;
4510 }
4511 
4512 /* Return a dep status that contains all speculation types of DS.  */
4513 ds_t
ds_get_speculation_types(ds_t ds)4514 ds_get_speculation_types (ds_t ds)
4515 {
4516   if (ds & BEGIN_DATA)
4517     ds |= BEGIN_DATA;
4518   if (ds & BE_IN_DATA)
4519     ds |= BE_IN_DATA;
4520   if (ds & BEGIN_CONTROL)
4521     ds |= BEGIN_CONTROL;
4522   if (ds & BE_IN_CONTROL)
4523     ds |= BE_IN_CONTROL;
4524 
4525   return ds & SPECULATIVE;
4526 }
4527 
4528 /* Return a dep status that contains maximal weakness for each speculation
4529    type present in DS.  */
4530 ds_t
ds_get_max_dep_weak(ds_t ds)4531 ds_get_max_dep_weak (ds_t ds)
4532 {
4533   if (ds & BEGIN_DATA)
4534     ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4535   if (ds & BE_IN_DATA)
4536     ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4537   if (ds & BEGIN_CONTROL)
4538     ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4539   if (ds & BE_IN_CONTROL)
4540     ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4541 
4542   return ds;
4543 }
4544 
4545 /* Dump information about the dependence status S.  */
4546 static void
dump_ds(FILE * f,ds_t s)4547 dump_ds (FILE *f, ds_t s)
4548 {
4549   fprintf (f, "{");
4550 
4551   if (s & BEGIN_DATA)
4552     fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4553   if (s & BE_IN_DATA)
4554     fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4555   if (s & BEGIN_CONTROL)
4556     fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4557   if (s & BE_IN_CONTROL)
4558     fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4559 
4560   if (s & HARD_DEP)
4561     fprintf (f, "HARD_DEP; ");
4562 
4563   if (s & DEP_TRUE)
4564     fprintf (f, "DEP_TRUE; ");
4565   if (s & DEP_OUTPUT)
4566     fprintf (f, "DEP_OUTPUT; ");
4567   if (s & DEP_ANTI)
4568     fprintf (f, "DEP_ANTI; ");
4569   if (s & DEP_CONTROL)
4570     fprintf (f, "DEP_CONTROL; ");
4571 
4572   fprintf (f, "}");
4573 }
4574 
4575 DEBUG_FUNCTION void
debug_ds(ds_t s)4576 debug_ds (ds_t s)
4577 {
4578   dump_ds (stderr, s);
4579   fprintf (stderr, "\n");
4580 }
4581 
4582 /* Verify that dependence type and status are consistent.
4583    If RELAXED_P is true, then skip dep_weakness checks.  */
4584 static void
check_dep(dep_t dep,bool relaxed_p)4585 check_dep (dep_t dep, bool relaxed_p)
4586 {
4587   enum reg_note dt = DEP_TYPE (dep);
4588   ds_t ds = DEP_STATUS (dep);
4589 
4590   gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4591 
4592   if (!(current_sched_info->flags & USE_DEPS_LIST))
4593     {
4594       gcc_assert (ds == 0);
4595       return;
4596     }
4597 
4598   /* Check that dependence type contains the same bits as the status.  */
4599   if (dt == REG_DEP_TRUE)
4600     gcc_assert (ds & DEP_TRUE);
4601   else if (dt == REG_DEP_OUTPUT)
4602     gcc_assert ((ds & DEP_OUTPUT)
4603 		&& !(ds & DEP_TRUE));
4604   else if (dt == REG_DEP_ANTI)
4605     gcc_assert ((ds & DEP_ANTI)
4606 		&& !(ds & (DEP_OUTPUT | DEP_TRUE)));
4607   else
4608     gcc_assert (dt == REG_DEP_CONTROL
4609 		&& (ds & DEP_CONTROL)
4610 		&& !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4611 
4612   /* HARD_DEP cannot appear in dep_status of a link.  */
4613   gcc_assert (!(ds & HARD_DEP));
4614 
4615   /* Check that dependence status is set correctly when speculation is not
4616      supported.  */
4617   if (!sched_deps_info->generate_spec_deps)
4618     gcc_assert (!(ds & SPECULATIVE));
4619   else if (ds & SPECULATIVE)
4620     {
4621       if (!relaxed_p)
4622 	{
4623 	  ds_t type = FIRST_SPEC_TYPE;
4624 
4625 	  /* Check that dependence weakness is in proper range.  */
4626 	  do
4627 	    {
4628 	      if (ds & type)
4629 		get_dep_weak (ds, type);
4630 
4631 	      if (type == LAST_SPEC_TYPE)
4632 		break;
4633 	      type <<= SPEC_TYPE_SHIFT;
4634 	    }
4635 	  while (1);
4636 	}
4637 
4638       if (ds & BEGIN_SPEC)
4639 	{
4640 	  /* Only true dependence can be data speculative.  */
4641 	  if (ds & BEGIN_DATA)
4642 	    gcc_assert (ds & DEP_TRUE);
4643 
4644 	  /* Control dependencies in the insn scheduler are represented by
4645 	     anti-dependencies, therefore only anti dependence can be
4646 	     control speculative.  */
4647 	  if (ds & BEGIN_CONTROL)
4648 	    gcc_assert (ds & DEP_ANTI);
4649 	}
4650       else
4651 	{
4652 	  /* Subsequent speculations should resolve true dependencies.  */
4653 	  gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4654 	}
4655 
4656       /* Check that true and anti dependencies can't have other speculative
4657 	 statuses.  */
4658       if (ds & DEP_TRUE)
4659 	gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4660       /* An output dependence can't be speculative at all.  */
4661       gcc_assert (!(ds & DEP_OUTPUT));
4662       if (ds & DEP_ANTI)
4663 	gcc_assert (ds & BEGIN_CONTROL);
4664     }
4665 }
4666 
4667 /* The following code discovers opportunities to switch a memory reference
4668    and an increment by modifying the address.  We ensure that this is done
4669    only for dependencies that are only used to show a single register
4670    dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4671    instruction involved is subject to only one dep that can cause a pattern
4672    change.
4673 
4674    When we discover a suitable dependency, we fill in the dep_replacement
4675    structure to show how to modify the memory reference.  */
4676 
4677 /* Holds information about a pair of memory reference and register increment
4678    insns which depend on each other, but could possibly be interchanged.  */
4679 struct mem_inc_info
4680 {
4681   rtx_insn *inc_insn;
4682   rtx_insn *mem_insn;
4683 
4684   rtx *mem_loc;
4685   /* A register occurring in the memory address for which we wish to break
4686      the dependence.  This must be identical to the destination register of
4687      the increment.  */
4688   rtx mem_reg0;
4689   /* Any kind of index that is added to that register.  */
4690   rtx mem_index;
4691   /* The constant offset used in the memory address.  */
4692   HOST_WIDE_INT mem_constant;
4693   /* The constant added in the increment insn.  Negated if the increment is
4694      after the memory address.  */
4695   HOST_WIDE_INT inc_constant;
4696   /* The source register used in the increment.  May be different from mem_reg0
4697      if the increment occurs before the memory address.  */
4698   rtx inc_input;
4699 };
4700 
4701 /* Verify that the memory location described in MII can be replaced with
4702    one using NEW_ADDR.  Return the new memory reference or NULL_RTX.  The
4703    insn remains unchanged by this function.  */
4704 
4705 static rtx
attempt_change(struct mem_inc_info * mii,rtx new_addr)4706 attempt_change (struct mem_inc_info *mii, rtx new_addr)
4707 {
4708   rtx mem = *mii->mem_loc;
4709   rtx new_mem;
4710 
4711   /* Jump through a lot of hoops to keep the attributes up to date.  We
4712      do not want to call one of the change address variants that take
4713      an offset even though we know the offset in many cases.  These
4714      assume you are changing where the address is pointing by the
4715      offset.  */
4716   new_mem = replace_equiv_address_nv (mem, new_addr);
4717   if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4718     {
4719       if (sched_verbose >= 5)
4720 	fprintf (sched_dump, "validation failure\n");
4721       return NULL_RTX;
4722     }
4723 
4724   /* Put back the old one.  */
4725   validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4726 
4727   return new_mem;
4728 }
4729 
4730 /* Return true if INSN is of a form "a = b op c" where a and b are
4731    regs.  op is + if c is a reg and +|- if c is a const.  Fill in
4732    informantion in MII about what is found.
4733    BEFORE_MEM indicates whether the increment is found before or after
4734    a corresponding memory reference.  */
4735 
4736 static bool
parse_add_or_inc(struct mem_inc_info * mii,rtx_insn * insn,bool before_mem)4737 parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
4738 {
4739   rtx pat = single_set (insn);
4740   rtx src, cst;
4741   bool regs_equal;
4742 
4743   if (RTX_FRAME_RELATED_P (insn) || !pat)
4744     return false;
4745 
4746   /* Do not allow breaking data dependencies for insns that are marked
4747      with REG_STACK_CHECK.  */
4748   if (find_reg_note (insn, REG_STACK_CHECK, NULL))
4749     return false;
4750 
4751   /* Result must be single reg.  */
4752   if (!REG_P (SET_DEST (pat)))
4753     return false;
4754 
4755   if (GET_CODE (SET_SRC (pat)) != PLUS)
4756     return false;
4757 
4758   mii->inc_insn = insn;
4759   src = SET_SRC (pat);
4760   mii->inc_input = XEXP (src, 0);
4761 
4762   if (!REG_P (XEXP (src, 0)))
4763     return false;
4764 
4765   if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4766     return false;
4767 
4768   cst = XEXP (src, 1);
4769   if (!CONST_INT_P (cst))
4770     return false;
4771   mii->inc_constant = INTVAL (cst);
4772 
4773   regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4774 
4775   if (!before_mem)
4776     {
4777       mii->inc_constant = -mii->inc_constant;
4778       if (!regs_equal)
4779 	return false;
4780     }
4781 
4782   if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
4783     {
4784       /* Note that the sign has already been reversed for !before_mem.  */
4785       if (STACK_GROWS_DOWNWARD)
4786 	return mii->inc_constant > 0;
4787       else
4788 	return mii->inc_constant < 0;
4789     }
4790   return true;
4791 }
4792 
4793 /* Once a suitable mem reference has been found and the corresponding data
4794    in MII has been filled in, this function is called to find a suitable
4795    add or inc insn involving the register we found in the memory
4796    reference.  */
4797 
4798 static bool
find_inc(struct mem_inc_info * mii,bool backwards)4799 find_inc (struct mem_inc_info *mii, bool backwards)
4800 {
4801   sd_iterator_def sd_it;
4802   dep_t dep;
4803 
4804   sd_it = sd_iterator_start (mii->mem_insn,
4805 			     backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4806   while (sd_iterator_cond (&sd_it, &dep))
4807     {
4808       dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
4809       rtx_insn *pro = DEP_PRO (dep);
4810       rtx_insn *con = DEP_CON (dep);
4811       rtx_insn *inc_cand = backwards ? pro : con;
4812       if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4813 	goto next;
4814       if (parse_add_or_inc (mii, inc_cand, backwards))
4815 	{
4816 	  struct dep_replacement *desc;
4817 	  df_ref def;
4818 	  rtx newaddr, newmem;
4819 
4820 	  if (sched_verbose >= 5)
4821 	    fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4822 		     INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4823 
4824 	  /* Need to assure that none of the operands of the inc
4825 	     instruction are assigned to by the mem insn.  */
4826 	  FOR_EACH_INSN_DEF (def, mii->mem_insn)
4827 	    if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4828 		|| reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4829 	      {
4830 		if (sched_verbose >= 5)
4831 		  fprintf (sched_dump,
4832 			   "inc conflicts with store failure.\n");
4833 		goto next;
4834 	      }
4835 
4836 	  newaddr = mii->inc_input;
4837 	  if (mii->mem_index != NULL_RTX)
4838 	    newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4839 				    mii->mem_index);
4840 	  newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4841 				   mii->mem_constant + mii->inc_constant);
4842 	  newmem = attempt_change (mii, newaddr);
4843 	  if (newmem == NULL_RTX)
4844 	    goto next;
4845 	  if (sched_verbose >= 5)
4846 	    fprintf (sched_dump, "successful address replacement\n");
4847 	  desc = XCNEW (struct dep_replacement);
4848 	  DEP_REPLACE (dep) = desc;
4849 	  desc->loc = mii->mem_loc;
4850 	  desc->newval = newmem;
4851 	  desc->orig = *desc->loc;
4852 	  desc->insn = mii->mem_insn;
4853 	  move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4854 			 INSN_SPEC_BACK_DEPS (con));
4855 	  if (backwards)
4856 	    {
4857 	      FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
4858 		add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4859 				  REG_DEP_TRUE);
4860 	    }
4861 	  else
4862 	    {
4863 	      FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
4864 		add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4865 				  REG_DEP_ANTI);
4866 	    }
4867 	  return true;
4868 	}
4869     next:
4870       sd_iterator_next (&sd_it);
4871     }
4872   return false;
4873 }
4874 
4875 /* A recursive function that walks ADDRESS_OF_X to find memory references
4876    which could be modified during scheduling.  We call find_inc for each
4877    one we find that has a recognizable form.  MII holds information about
4878    the pair of memory/increment instructions.
4879    We ensure that every instruction with a memory reference (which will be
4880    the location of the replacement) is assigned at most one breakable
4881    dependency.  */
4882 
4883 static bool
find_mem(struct mem_inc_info * mii,rtx * address_of_x)4884 find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4885 {
4886   rtx x = *address_of_x;
4887   enum rtx_code code = GET_CODE (x);
4888   const char *const fmt = GET_RTX_FORMAT (code);
4889   int i;
4890 
4891   if (code == MEM)
4892     {
4893       rtx reg0 = XEXP (x, 0);
4894 
4895       mii->mem_loc = address_of_x;
4896       mii->mem_index = NULL_RTX;
4897       mii->mem_constant = 0;
4898       if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4899 	{
4900 	  mii->mem_constant = INTVAL (XEXP (reg0, 1));
4901 	  reg0 = XEXP (reg0, 0);
4902 	}
4903       if (GET_CODE (reg0) == PLUS)
4904 	{
4905 	  mii->mem_index = XEXP (reg0, 1);
4906 	  reg0 = XEXP (reg0, 0);
4907 	}
4908       if (REG_P (reg0))
4909 	{
4910 	  df_ref use;
4911 	  int occurrences = 0;
4912 
4913 	  /* Make sure this reg appears only once in this insn.  Can't use
4914 	     count_occurrences since that only works for pseudos.  */
4915 	  FOR_EACH_INSN_USE (use, mii->mem_insn)
4916 	    if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
4917 	      if (++occurrences > 1)
4918 		{
4919 		  if (sched_verbose >= 5)
4920 		    fprintf (sched_dump, "mem count failure\n");
4921 		  return false;
4922 		}
4923 
4924 	  mii->mem_reg0 = reg0;
4925 	  return find_inc (mii, true) || find_inc (mii, false);
4926 	}
4927       return false;
4928     }
4929 
4930   if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4931     {
4932       /* If REG occurs inside a MEM used in a bit-field reference,
4933 	 that is unacceptable.  */
4934       return false;
4935     }
4936 
4937   /* Time for some deep diving.  */
4938   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4939     {
4940       if (fmt[i] == 'e')
4941 	{
4942 	  if (find_mem (mii, &XEXP (x, i)))
4943 	    return true;
4944 	}
4945       else if (fmt[i] == 'E')
4946 	{
4947 	  int j;
4948 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4949 	    if (find_mem (mii, &XVECEXP (x, i, j)))
4950 	      return true;
4951 	}
4952     }
4953   return false;
4954 }
4955 
4956 
4957 /* Examine the instructions between HEAD and TAIL and try to find
4958    dependencies that can be broken by modifying one of the patterns.  */
4959 
4960 void
find_modifiable_mems(rtx_insn * head,rtx_insn * tail)4961 find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
4962 {
4963   rtx_insn *insn, *next_tail = NEXT_INSN (tail);
4964   int success_in_block = 0;
4965 
4966   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4967     {
4968       struct mem_inc_info mii;
4969 
4970       if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4971 	continue;
4972 
4973       mii.mem_insn = insn;
4974       if (find_mem (&mii, &PATTERN (insn)))
4975 	success_in_block++;
4976     }
4977   if (success_in_block && sched_verbose >= 5)
4978     fprintf (sched_dump, "%d candidates for address modification found.\n",
4979 	     success_in_block);
4980 }
4981 
4982 #endif /* INSN_SCHEDULING */
4983