1 /* Instruction scheduling pass.  This file computes dependencies between
2    instructions.
3    Copyright (C) 1992-2018 Free Software Foundation, Inc.
4    Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5    and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 
7 This file is part of GCC.
8 
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13 
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
17 for more details.
18 
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3.  If not see
21 <http://www.gnu.org/licenses/>.  */
22 
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "df.h"
31 #include "insn-config.h"
32 #include "regs.h"
33 #include "memmodel.h"
34 #include "ira.h"
35 #include "ira-int.h"
36 #include "insn-attr.h"
37 #include "cfgbuild.h"
38 #include "sched-int.h"
39 #include "params.h"
40 #include "cselib.h"
41 
42 #ifdef INSN_SCHEDULING
43 
44 /* Holds current parameters for the dependency analyzer.  */
45 struct sched_deps_info_def *sched_deps_info;
46 
47 /* The data is specific to the Haifa scheduler.  */
48 vec<haifa_deps_insn_data_def>
49     h_d_i_d = vNULL;
50 
51 /* Return the major type present in the DS.  */
52 enum reg_note
ds_to_dk(ds_t ds)53 ds_to_dk (ds_t ds)
54 {
55   if (ds & DEP_TRUE)
56     return REG_DEP_TRUE;
57 
58   if (ds & DEP_OUTPUT)
59     return REG_DEP_OUTPUT;
60 
61   if (ds & DEP_CONTROL)
62     return REG_DEP_CONTROL;
63 
64   gcc_assert (ds & DEP_ANTI);
65 
66   return REG_DEP_ANTI;
67 }
68 
69 /* Return equivalent dep_status.  */
70 ds_t
dk_to_ds(enum reg_note dk)71 dk_to_ds (enum reg_note dk)
72 {
73   switch (dk)
74     {
75     case REG_DEP_TRUE:
76       return DEP_TRUE;
77 
78     case REG_DEP_OUTPUT:
79       return DEP_OUTPUT;
80 
81     case REG_DEP_CONTROL:
82       return DEP_CONTROL;
83 
84     default:
85       gcc_assert (dk == REG_DEP_ANTI);
86       return DEP_ANTI;
87     }
88 }
89 
90 /* Functions to operate with dependence information container - dep_t.  */
91 
92 /* Init DEP with the arguments.  */
93 void
init_dep_1(dep_t dep,rtx_insn * pro,rtx_insn * con,enum reg_note type,ds_t ds)94 init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
95 {
96   DEP_PRO (dep) = pro;
97   DEP_CON (dep) = con;
98   DEP_TYPE (dep) = type;
99   DEP_STATUS (dep) = ds;
100   DEP_COST (dep) = UNKNOWN_DEP_COST;
101   DEP_NONREG (dep) = 0;
102   DEP_MULTIPLE (dep) = 0;
103   DEP_REPLACE (dep) = NULL;
104 }
105 
106 /* Init DEP with the arguments.
107    While most of the scheduler (including targets) only need the major type
108    of the dependency, it is convenient to hide full dep_status from them.  */
109 void
init_dep(dep_t dep,rtx_insn * pro,rtx_insn * con,enum reg_note kind)110 init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
111 {
112   ds_t ds;
113 
114   if ((current_sched_info->flags & USE_DEPS_LIST))
115     ds = dk_to_ds (kind);
116   else
117     ds = 0;
118 
119   init_dep_1 (dep, pro, con, kind, ds);
120 }
121 
122 /* Make a copy of FROM in TO.  */
123 static void
copy_dep(dep_t to,dep_t from)124 copy_dep (dep_t to, dep_t from)
125 {
126   memcpy (to, from, sizeof (*to));
127 }
128 
129 static void dump_ds (FILE *, ds_t);
130 
131 /* Define flags for dump_dep ().  */
132 
133 /* Dump producer of the dependence.  */
134 #define DUMP_DEP_PRO (2)
135 
136 /* Dump consumer of the dependence.  */
137 #define DUMP_DEP_CON (4)
138 
139 /* Dump type of the dependence.  */
140 #define DUMP_DEP_TYPE (8)
141 
142 /* Dump status of the dependence.  */
143 #define DUMP_DEP_STATUS (16)
144 
145 /* Dump all information about the dependence.  */
146 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE	\
147 		      |DUMP_DEP_STATUS)
148 
149 /* Dump DEP to DUMP.
150    FLAGS is a bit mask specifying what information about DEP needs
151    to be printed.
152    If FLAGS has the very first bit set, then dump all information about DEP
153    and propagate this bit into the callee dump functions.  */
154 static void
dump_dep(FILE * dump,dep_t dep,int flags)155 dump_dep (FILE *dump, dep_t dep, int flags)
156 {
157   if (flags & 1)
158     flags |= DUMP_DEP_ALL;
159 
160   fprintf (dump, "<");
161 
162   if (flags & DUMP_DEP_PRO)
163     fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
164 
165   if (flags & DUMP_DEP_CON)
166     fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
167 
168   if (flags & DUMP_DEP_TYPE)
169     {
170       char t;
171       enum reg_note type = DEP_TYPE (dep);
172 
173       switch (type)
174 	{
175 	case REG_DEP_TRUE:
176 	  t = 't';
177 	  break;
178 
179 	case REG_DEP_OUTPUT:
180 	  t = 'o';
181 	  break;
182 
183 	case REG_DEP_CONTROL:
184 	  t = 'c';
185 	  break;
186 
187 	case REG_DEP_ANTI:
188 	  t = 'a';
189 	  break;
190 
191 	default:
192 	  gcc_unreachable ();
193 	  break;
194 	}
195 
196       fprintf (dump, "%c; ", t);
197     }
198 
199   if (flags & DUMP_DEP_STATUS)
200     {
201       if (current_sched_info->flags & USE_DEPS_LIST)
202 	dump_ds (dump, DEP_STATUS (dep));
203     }
204 
205   fprintf (dump, ">");
206 }
207 
208 /* Default flags for dump_dep ().  */
209 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
210 
211 /* Dump all fields of DEP to STDERR.  */
212 void
sd_debug_dep(dep_t dep)213 sd_debug_dep (dep_t dep)
214 {
215   dump_dep (stderr, dep, 1);
216   fprintf (stderr, "\n");
217 }
218 
219 /* Determine whether DEP is a dependency link of a non-debug insn on a
220    debug insn.  */
221 
222 static inline bool
depl_on_debug_p(dep_link_t dep)223 depl_on_debug_p (dep_link_t dep)
224 {
225   return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
226 	  && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
227 }
228 
229 /* Functions to operate with a single link from the dependencies lists -
230    dep_link_t.  */
231 
232 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
233    PREV_NEXT_P.  */
234 static void
attach_dep_link(dep_link_t l,dep_link_t * prev_nextp)235 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
236 {
237   dep_link_t next = *prev_nextp;
238 
239   gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
240 	      && DEP_LINK_NEXT (l) == NULL);
241 
242   /* Init node being inserted.  */
243   DEP_LINK_PREV_NEXTP (l) = prev_nextp;
244   DEP_LINK_NEXT (l) = next;
245 
246   /* Fix next node.  */
247   if (next != NULL)
248     {
249       gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
250 
251       DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
252     }
253 
254   /* Fix prev node.  */
255   *prev_nextp = l;
256 }
257 
258 /* Add dep_link LINK to deps_list L.  */
259 static void
add_to_deps_list(dep_link_t link,deps_list_t l)260 add_to_deps_list (dep_link_t link, deps_list_t l)
261 {
262   attach_dep_link (link, &DEPS_LIST_FIRST (l));
263 
264   /* Don't count debug deps.  */
265   if (!depl_on_debug_p (link))
266     ++DEPS_LIST_N_LINKS (l);
267 }
268 
269 /* Detach dep_link L from the list.  */
270 static void
detach_dep_link(dep_link_t l)271 detach_dep_link (dep_link_t l)
272 {
273   dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
274   dep_link_t next = DEP_LINK_NEXT (l);
275 
276   *prev_nextp = next;
277 
278   if (next != NULL)
279     DEP_LINK_PREV_NEXTP (next) = prev_nextp;
280 
281   DEP_LINK_PREV_NEXTP (l) = NULL;
282   DEP_LINK_NEXT (l) = NULL;
283 }
284 
285 /* Remove link LINK from list LIST.  */
286 static void
remove_from_deps_list(dep_link_t link,deps_list_t list)287 remove_from_deps_list (dep_link_t link, deps_list_t list)
288 {
289   detach_dep_link (link);
290 
291   /* Don't count debug deps.  */
292   if (!depl_on_debug_p (link))
293     --DEPS_LIST_N_LINKS (list);
294 }
295 
296 /* Move link LINK from list FROM to list TO.  */
297 static void
move_dep_link(dep_link_t link,deps_list_t from,deps_list_t to)298 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
299 {
300   remove_from_deps_list (link, from);
301   add_to_deps_list (link, to);
302 }
303 
304 /* Return true of LINK is not attached to any list.  */
305 static bool
dep_link_is_detached_p(dep_link_t link)306 dep_link_is_detached_p (dep_link_t link)
307 {
308   return DEP_LINK_PREV_NEXTP (link) == NULL;
309 }
310 
311 /* Pool to hold all dependency nodes (dep_node_t).  */
312 static object_allocator<_dep_node> *dn_pool;
313 
314 /* Number of dep_nodes out there.  */
315 static int dn_pool_diff = 0;
316 
317 /* Create a dep_node.  */
318 static dep_node_t
create_dep_node(void)319 create_dep_node (void)
320 {
321   dep_node_t n = dn_pool->allocate ();
322   dep_link_t back = DEP_NODE_BACK (n);
323   dep_link_t forw = DEP_NODE_FORW (n);
324 
325   DEP_LINK_NODE (back) = n;
326   DEP_LINK_NEXT (back) = NULL;
327   DEP_LINK_PREV_NEXTP (back) = NULL;
328 
329   DEP_LINK_NODE (forw) = n;
330   DEP_LINK_NEXT (forw) = NULL;
331   DEP_LINK_PREV_NEXTP (forw) = NULL;
332 
333   ++dn_pool_diff;
334 
335   return n;
336 }
337 
338 /* Delete dep_node N.  N must not be connected to any deps_list.  */
339 static void
delete_dep_node(dep_node_t n)340 delete_dep_node (dep_node_t n)
341 {
342   gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
343 	      && dep_link_is_detached_p (DEP_NODE_FORW (n)));
344 
345   XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
346 
347   --dn_pool_diff;
348 
349   dn_pool->remove (n);
350 }
351 
352 /* Pool to hold dependencies lists (deps_list_t).  */
353 static object_allocator<_deps_list> *dl_pool;
354 
355 /* Number of deps_lists out there.  */
356 static int dl_pool_diff = 0;
357 
358 /* Functions to operate with dependences lists - deps_list_t.  */
359 
360 /* Return true if list L is empty.  */
361 static bool
deps_list_empty_p(deps_list_t l)362 deps_list_empty_p (deps_list_t l)
363 {
364   return DEPS_LIST_N_LINKS (l) == 0;
365 }
366 
367 /* Create a new deps_list.  */
368 static deps_list_t
create_deps_list(void)369 create_deps_list (void)
370 {
371   deps_list_t l = dl_pool->allocate ();
372 
373   DEPS_LIST_FIRST (l) = NULL;
374   DEPS_LIST_N_LINKS (l) = 0;
375 
376   ++dl_pool_diff;
377   return l;
378 }
379 
380 /* Free deps_list L.  */
381 static void
free_deps_list(deps_list_t l)382 free_deps_list (deps_list_t l)
383 {
384   gcc_assert (deps_list_empty_p (l));
385 
386   --dl_pool_diff;
387 
388   dl_pool->remove (l);
389 }
390 
391 /* Return true if there is no dep_nodes and deps_lists out there.
392    After the region is scheduled all the dependency nodes and lists
393    should [generally] be returned to pool.  */
394 bool
deps_pools_are_empty_p(void)395 deps_pools_are_empty_p (void)
396 {
397   return dn_pool_diff == 0 && dl_pool_diff == 0;
398 }
399 
400 /* Remove all elements from L.  */
401 static void
clear_deps_list(deps_list_t l)402 clear_deps_list (deps_list_t l)
403 {
404   do
405     {
406       dep_link_t link = DEPS_LIST_FIRST (l);
407 
408       if (link == NULL)
409 	break;
410 
411       remove_from_deps_list (link, l);
412     }
413   while (1);
414 }
415 
416 /* Decide whether a dependency should be treated as a hard or a speculative
417    dependency.  */
418 static bool
dep_spec_p(dep_t dep)419 dep_spec_p (dep_t dep)
420 {
421   if (current_sched_info->flags & DO_SPECULATION)
422     {
423       if (DEP_STATUS (dep) & SPECULATIVE)
424 	return true;
425     }
426   if (current_sched_info->flags & DO_PREDICATION)
427     {
428       if (DEP_TYPE (dep) == REG_DEP_CONTROL)
429 	return true;
430     }
431   if (DEP_REPLACE (dep) != NULL)
432     return true;
433   return false;
434 }
435 
436 static regset reg_pending_sets;
437 static regset reg_pending_clobbers;
438 static regset reg_pending_uses;
439 static regset reg_pending_control_uses;
440 static enum reg_pending_barrier_mode reg_pending_barrier;
441 
442 /* Hard registers implicitly clobbered or used (or may be implicitly
443    clobbered or used) by the currently analyzed insn.  For example,
444    insn in its constraint has one register class.  Even if there is
445    currently no hard register in the insn, the particular hard
446    register will be in the insn after reload pass because the
447    constraint requires it.  */
448 static HARD_REG_SET implicit_reg_pending_clobbers;
449 static HARD_REG_SET implicit_reg_pending_uses;
450 
451 /* To speed up the test for duplicate dependency links we keep a
452    record of dependencies created by add_dependence when the average
453    number of instructions in a basic block is very large.
454 
455    Studies have shown that there is typically around 5 instructions between
456    branches for typical C code.  So we can make a guess that the average
457    basic block is approximately 5 instructions long; we will choose 100X
458    the average size as a very large basic block.
459 
460    Each insn has associated bitmaps for its dependencies.  Each bitmap
461    has enough entries to represent a dependency on any other insn in
462    the insn chain.  All bitmap for true dependencies cache is
463    allocated then the rest two ones are also allocated.  */
464 static bitmap_head *true_dependency_cache = NULL;
465 static bitmap_head *output_dependency_cache = NULL;
466 static bitmap_head *anti_dependency_cache = NULL;
467 static bitmap_head *control_dependency_cache = NULL;
468 static bitmap_head *spec_dependency_cache = NULL;
469 static int cache_size;
470 
471 /* True if we should mark added dependencies as a non-register deps.  */
472 static bool mark_as_hard;
473 
474 static int deps_may_trap_p (const_rtx);
475 static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
476 static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
477 				 enum reg_note, bool);
478 static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
479 					  rtx_insn_list **, int, enum reg_note,
480 					  bool);
481 static void delete_all_dependences (rtx_insn *);
482 static void chain_to_prev_insn (rtx_insn *);
483 
484 static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
485 static void sched_analyze_1 (struct deps_desc *, rtx, rtx_insn *);
486 static void sched_analyze_2 (struct deps_desc *, rtx, rtx_insn *);
487 static void sched_analyze_insn (struct deps_desc *, rtx, rtx_insn *);
488 
489 static bool sched_has_condition_p (const rtx_insn *);
490 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
491 
492 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
493 							  rtx, rtx);
494 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
495 
496 static void check_dep (dep_t, bool);
497 
498 
499 /* Return nonzero if a load of the memory reference MEM can cause a trap.  */
500 
501 static int
deps_may_trap_p(const_rtx mem)502 deps_may_trap_p (const_rtx mem)
503 {
504   const_rtx addr = XEXP (mem, 0);
505 
506   if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
507     {
508       const_rtx t = get_reg_known_value (REGNO (addr));
509       if (t)
510 	addr = t;
511     }
512   return rtx_addr_can_trap_p (addr);
513 }
514 
515 
516 /* Find the condition under which INSN is executed.  If REV is not NULL,
517    it is set to TRUE when the returned comparison should be reversed
518    to get the actual condition.  */
519 static rtx
sched_get_condition_with_rev_uncached(const rtx_insn * insn,bool * rev)520 sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev)
521 {
522   rtx pat = PATTERN (insn);
523   rtx src;
524 
525   if (rev)
526     *rev = false;
527 
528   if (GET_CODE (pat) == COND_EXEC)
529     return COND_EXEC_TEST (pat);
530 
531   if (!any_condjump_p (insn) || !onlyjump_p (insn))
532     return 0;
533 
534   src = SET_SRC (pc_set (insn));
535 
536   if (XEXP (src, 2) == pc_rtx)
537     return XEXP (src, 0);
538   else if (XEXP (src, 1) == pc_rtx)
539     {
540       rtx cond = XEXP (src, 0);
541       enum rtx_code revcode = reversed_comparison_code (cond, insn);
542 
543       if (revcode == UNKNOWN)
544 	return 0;
545 
546       if (rev)
547 	*rev = true;
548       return cond;
549     }
550 
551   return 0;
552 }
553 
554 /* Return the condition under which INSN does not execute (i.e.  the
555    not-taken condition for a conditional branch), or NULL if we cannot
556    find such a condition.  The caller should make a copy of the condition
557    before using it.  */
558 rtx
sched_get_reverse_condition_uncached(const rtx_insn * insn)559 sched_get_reverse_condition_uncached (const rtx_insn *insn)
560 {
561   bool rev;
562   rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
563   if (cond == NULL_RTX)
564     return cond;
565   if (!rev)
566     {
567       enum rtx_code revcode = reversed_comparison_code (cond, insn);
568       cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
569 			     XEXP (cond, 0),
570 			     XEXP (cond, 1));
571     }
572   return cond;
573 }
574 
575 /* Caching variant of sched_get_condition_with_rev_uncached.
576    We only do actual work the first time we come here for an insn; the
577    results are cached in INSN_CACHED_COND and INSN_REVERSE_COND.  */
578 static rtx
sched_get_condition_with_rev(const rtx_insn * insn,bool * rev)579 sched_get_condition_with_rev (const rtx_insn *insn, bool *rev)
580 {
581   bool tmp;
582 
583   if (INSN_LUID (insn) == 0)
584     return sched_get_condition_with_rev_uncached (insn, rev);
585 
586   if (INSN_CACHED_COND (insn) == const_true_rtx)
587     return NULL_RTX;
588 
589   if (INSN_CACHED_COND (insn) != NULL_RTX)
590     {
591       if (rev)
592 	*rev = INSN_REVERSE_COND (insn);
593       return INSN_CACHED_COND (insn);
594     }
595 
596   INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
597   INSN_REVERSE_COND (insn) = tmp;
598 
599   if (INSN_CACHED_COND (insn) == NULL_RTX)
600     {
601       INSN_CACHED_COND (insn) = const_true_rtx;
602       return NULL_RTX;
603     }
604 
605   if (rev)
606     *rev = INSN_REVERSE_COND (insn);
607   return INSN_CACHED_COND (insn);
608 }
609 
610 /* True when we can find a condition under which INSN is executed.  */
611 static bool
sched_has_condition_p(const rtx_insn * insn)612 sched_has_condition_p (const rtx_insn *insn)
613 {
614   return !! sched_get_condition_with_rev (insn, NULL);
615 }
616 
617 
618 
619 /* Return nonzero if conditions COND1 and COND2 can never be both true.  */
620 static int
conditions_mutex_p(const_rtx cond1,const_rtx cond2,bool rev1,bool rev2)621 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
622 {
623   if (COMPARISON_P (cond1)
624       && COMPARISON_P (cond2)
625       && GET_CODE (cond1) ==
626 	  (rev1==rev2
627 	  ? reversed_comparison_code (cond2, NULL)
628 	  : GET_CODE (cond2))
629       && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
630       && XEXP (cond1, 1) == XEXP (cond2, 1))
631     return 1;
632   return 0;
633 }
634 
635 /* Return true if insn1 and insn2 can never depend on one another because
636    the conditions under which they are executed are mutually exclusive.  */
637 bool
sched_insns_conditions_mutex_p(const rtx_insn * insn1,const rtx_insn * insn2)638 sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2)
639 {
640   rtx cond1, cond2;
641   bool rev1 = false, rev2 = false;
642 
643   /* df doesn't handle conditional lifetimes entirely correctly;
644      calls mess up the conditional lifetimes.  */
645   if (!CALL_P (insn1) && !CALL_P (insn2))
646     {
647       cond1 = sched_get_condition_with_rev (insn1, &rev1);
648       cond2 = sched_get_condition_with_rev (insn2, &rev2);
649       if (cond1 && cond2
650 	  && conditions_mutex_p (cond1, cond2, rev1, rev2)
651 	  /* Make sure first instruction doesn't affect condition of second
652 	     instruction if switched.  */
653 	  && !modified_in_p (cond1, insn2)
654 	  /* Make sure second instruction doesn't affect condition of first
655 	     instruction if switched.  */
656 	  && !modified_in_p (cond2, insn1))
657 	return true;
658     }
659   return false;
660 }
661 
662 
663 /* Return true if INSN can potentially be speculated with type DS.  */
664 bool
sched_insn_is_legitimate_for_speculation_p(const rtx_insn * insn,ds_t ds)665 sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds)
666 {
667   if (HAS_INTERNAL_DEP (insn))
668     return false;
669 
670   if (!NONJUMP_INSN_P (insn))
671     return false;
672 
673   if (SCHED_GROUP_P (insn))
674     return false;
675 
676   if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn)))
677     return false;
678 
679   if (side_effects_p (PATTERN (insn)))
680     return false;
681 
682   if (ds & BE_IN_SPEC)
683     /* The following instructions, which depend on a speculatively scheduled
684        instruction, cannot be speculatively scheduled along.  */
685     {
686       if (may_trap_or_fault_p (PATTERN (insn)))
687 	/* If instruction might fault, it cannot be speculatively scheduled.
688 	   For control speculation it's obvious why and for data speculation
689 	   it's because the insn might get wrong input if speculation
690 	   wasn't successful.  */
691 	return false;
692 
693       if ((ds & BE_IN_DATA)
694 	  && sched_has_condition_p (insn))
695 	/* If this is a predicated instruction, then it cannot be
696 	   speculatively scheduled.  See PR35659.  */
697 	return false;
698     }
699 
700   return true;
701 }
702 
703 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
704    initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
705    and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
706    This function is used to switch sd_iterator to the next list.
707    !!! For internal use only.  Might consider moving it to sched-int.h.  */
708 void
sd_next_list(const_rtx insn,sd_list_types_def * types_ptr,deps_list_t * list_ptr,bool * resolved_p_ptr)709 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
710 	      deps_list_t *list_ptr, bool *resolved_p_ptr)
711 {
712   sd_list_types_def types = *types_ptr;
713 
714   if (types & SD_LIST_HARD_BACK)
715     {
716       *list_ptr = INSN_HARD_BACK_DEPS (insn);
717       *resolved_p_ptr = false;
718       *types_ptr = types & ~SD_LIST_HARD_BACK;
719     }
720   else if (types & SD_LIST_SPEC_BACK)
721     {
722       *list_ptr = INSN_SPEC_BACK_DEPS (insn);
723       *resolved_p_ptr = false;
724       *types_ptr = types & ~SD_LIST_SPEC_BACK;
725     }
726   else if (types & SD_LIST_FORW)
727     {
728       *list_ptr = INSN_FORW_DEPS (insn);
729       *resolved_p_ptr = false;
730       *types_ptr = types & ~SD_LIST_FORW;
731     }
732   else if (types & SD_LIST_RES_BACK)
733     {
734       *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
735       *resolved_p_ptr = true;
736       *types_ptr = types & ~SD_LIST_RES_BACK;
737     }
738   else if (types & SD_LIST_RES_FORW)
739     {
740       *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
741       *resolved_p_ptr = true;
742       *types_ptr = types & ~SD_LIST_RES_FORW;
743     }
744   else
745     {
746       *list_ptr = NULL;
747       *resolved_p_ptr = false;
748       *types_ptr = SD_LIST_NONE;
749     }
750 }
751 
752 /* Return the summary size of INSN's lists defined by LIST_TYPES.  */
753 int
sd_lists_size(const_rtx insn,sd_list_types_def list_types)754 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
755 {
756   int size = 0;
757 
758   while (list_types != SD_LIST_NONE)
759     {
760       deps_list_t list;
761       bool resolved_p;
762 
763       sd_next_list (insn, &list_types, &list, &resolved_p);
764       if (list)
765 	size += DEPS_LIST_N_LINKS (list);
766     }
767 
768   return size;
769 }
770 
771 /* Return true if INSN's lists defined by LIST_TYPES are all empty.  */
772 
773 bool
sd_lists_empty_p(const_rtx insn,sd_list_types_def list_types)774 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
775 {
776   while (list_types != SD_LIST_NONE)
777     {
778       deps_list_t list;
779       bool resolved_p;
780 
781       sd_next_list (insn, &list_types, &list, &resolved_p);
782       if (!deps_list_empty_p (list))
783 	return false;
784     }
785 
786   return true;
787 }
788 
789 /* Initialize data for INSN.  */
790 void
sd_init_insn(rtx_insn * insn)791 sd_init_insn (rtx_insn *insn)
792 {
793   INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
794   INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
795   INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
796   INSN_FORW_DEPS (insn) = create_deps_list ();
797   INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
798 
799   /* ??? It would be nice to allocate dependency caches here.  */
800 }
801 
802 /* Free data for INSN.  */
803 void
sd_finish_insn(rtx_insn * insn)804 sd_finish_insn (rtx_insn *insn)
805 {
806   /* ??? It would be nice to deallocate dependency caches here.  */
807 
808   free_deps_list (INSN_HARD_BACK_DEPS (insn));
809   INSN_HARD_BACK_DEPS (insn) = NULL;
810 
811   free_deps_list (INSN_SPEC_BACK_DEPS (insn));
812   INSN_SPEC_BACK_DEPS (insn) = NULL;
813 
814   free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
815   INSN_RESOLVED_BACK_DEPS (insn) = NULL;
816 
817   free_deps_list (INSN_FORW_DEPS (insn));
818   INSN_FORW_DEPS (insn) = NULL;
819 
820   free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
821   INSN_RESOLVED_FORW_DEPS (insn) = NULL;
822 }
823 
824 /* Find a dependency between producer PRO and consumer CON.
825    Search through resolved dependency lists if RESOLVED_P is true.
826    If no such dependency is found return NULL,
827    otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
828    with an iterator pointing to it.  */
829 static dep_t
sd_find_dep_between_no_cache(rtx pro,rtx con,bool resolved_p,sd_iterator_def * sd_it_ptr)830 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
831 			      sd_iterator_def *sd_it_ptr)
832 {
833   sd_list_types_def pro_list_type;
834   sd_list_types_def con_list_type;
835   sd_iterator_def sd_it;
836   dep_t dep;
837   bool found_p = false;
838 
839   if (resolved_p)
840     {
841       pro_list_type = SD_LIST_RES_FORW;
842       con_list_type = SD_LIST_RES_BACK;
843     }
844   else
845     {
846       pro_list_type = SD_LIST_FORW;
847       con_list_type = SD_LIST_BACK;
848     }
849 
850   /* Walk through either back list of INSN or forw list of ELEM
851      depending on which one is shorter.  */
852   if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
853     {
854       /* Find the dep_link with producer PRO in consumer's back_deps.  */
855       FOR_EACH_DEP (con, con_list_type, sd_it, dep)
856 	if (DEP_PRO (dep) == pro)
857 	  {
858 	    found_p = true;
859 	    break;
860 	  }
861     }
862   else
863     {
864       /* Find the dep_link with consumer CON in producer's forw_deps.  */
865       FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
866 	if (DEP_CON (dep) == con)
867 	  {
868 	    found_p = true;
869 	    break;
870 	  }
871     }
872 
873   if (found_p)
874     {
875       if (sd_it_ptr != NULL)
876 	*sd_it_ptr = sd_it;
877 
878       return dep;
879     }
880 
881   return NULL;
882 }
883 
884 /* Find a dependency between producer PRO and consumer CON.
885    Use dependency [if available] to check if dependency is present at all.
886    Search through resolved dependency lists if RESOLVED_P is true.
887    If the dependency or NULL if none found.  */
888 dep_t
sd_find_dep_between(rtx pro,rtx con,bool resolved_p)889 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
890 {
891   if (true_dependency_cache != NULL)
892     /* Avoiding the list walk below can cut compile times dramatically
893        for some code.  */
894     {
895       int elem_luid = INSN_LUID (pro);
896       int insn_luid = INSN_LUID (con);
897 
898       if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
899 	  && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
900 	  && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
901 	  && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
902 	return NULL;
903     }
904 
905   return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
906 }
907 
908 /* Add or update  a dependence described by DEP.
909    MEM1 and MEM2, if non-null, correspond to memory locations in case of
910    data speculation.
911 
912    The function returns a value indicating if an old entry has been changed
913    or a new entry has been added to insn's backward deps.
914 
915    This function merely checks if producer and consumer is the same insn
916    and doesn't create a dep in this case.  Actual manipulation of
917    dependence data structures is performed in add_or_update_dep_1.  */
918 static enum DEPS_ADJUST_RESULT
maybe_add_or_update_dep_1(dep_t dep,bool resolved_p,rtx mem1,rtx mem2)919 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
920 {
921   rtx_insn *elem = DEP_PRO (dep);
922   rtx_insn *insn = DEP_CON (dep);
923 
924   gcc_assert (INSN_P (insn) && INSN_P (elem));
925 
926   /* Don't depend an insn on itself.  */
927   if (insn == elem)
928     {
929       if (sched_deps_info->generate_spec_deps)
930         /* INSN has an internal dependence, which we can't overcome.  */
931         HAS_INTERNAL_DEP (insn) = 1;
932 
933       return DEP_NODEP;
934     }
935 
936   return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
937 }
938 
939 /* Ask dependency caches what needs to be done for dependence DEP.
940    Return DEP_CREATED if new dependence should be created and there is no
941    need to try to find one searching the dependencies lists.
942    Return DEP_PRESENT if there already is a dependence described by DEP and
943    hence nothing is to be done.
944    Return DEP_CHANGED if there already is a dependence, but it should be
945    updated to incorporate additional information from DEP.  */
946 static enum DEPS_ADJUST_RESULT
ask_dependency_caches(dep_t dep)947 ask_dependency_caches (dep_t dep)
948 {
949   int elem_luid = INSN_LUID (DEP_PRO (dep));
950   int insn_luid = INSN_LUID (DEP_CON (dep));
951 
952   gcc_assert (true_dependency_cache != NULL
953 	      && output_dependency_cache != NULL
954 	      && anti_dependency_cache != NULL
955 	      && control_dependency_cache != NULL);
956 
957   if (!(current_sched_info->flags & USE_DEPS_LIST))
958     {
959       enum reg_note present_dep_type;
960 
961       if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
962 	present_dep_type = REG_DEP_TRUE;
963       else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
964 	present_dep_type = REG_DEP_OUTPUT;
965       else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
966 	present_dep_type = REG_DEP_ANTI;
967       else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
968 	present_dep_type = REG_DEP_CONTROL;
969       else
970 	/* There is no existing dep so it should be created.  */
971 	return DEP_CREATED;
972 
973       if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
974 	/* DEP does not add anything to the existing dependence.  */
975 	return DEP_PRESENT;
976     }
977   else
978     {
979       ds_t present_dep_types = 0;
980 
981       if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
982 	present_dep_types |= DEP_TRUE;
983       if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
984 	present_dep_types |= DEP_OUTPUT;
985       if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
986 	present_dep_types |= DEP_ANTI;
987       if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
988 	present_dep_types |= DEP_CONTROL;
989 
990       if (present_dep_types == 0)
991 	/* There is no existing dep so it should be created.  */
992 	return DEP_CREATED;
993 
994       if (!(current_sched_info->flags & DO_SPECULATION)
995 	  || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
996 	{
997 	  if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
998 	      == present_dep_types)
999 	    /* DEP does not add anything to the existing dependence.  */
1000 	    return DEP_PRESENT;
1001 	}
1002       else
1003 	{
1004 	  /* Only true dependencies can be data speculative and
1005 	     only anti dependencies can be control speculative.  */
1006 	  gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1007 		      == present_dep_types);
1008 
1009 	  /* if (DEP is SPECULATIVE) then
1010 	     ..we should update DEP_STATUS
1011 	     else
1012 	     ..we should reset existing dep to non-speculative.  */
1013 	}
1014     }
1015 
1016   return DEP_CHANGED;
1017 }
1018 
1019 /* Set dependency caches according to DEP.  */
1020 static void
set_dependency_caches(dep_t dep)1021 set_dependency_caches (dep_t dep)
1022 {
1023   int elem_luid = INSN_LUID (DEP_PRO (dep));
1024   int insn_luid = INSN_LUID (DEP_CON (dep));
1025 
1026   if (!(current_sched_info->flags & USE_DEPS_LIST))
1027     {
1028       switch (DEP_TYPE (dep))
1029 	{
1030 	case REG_DEP_TRUE:
1031 	  bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1032 	  break;
1033 
1034 	case REG_DEP_OUTPUT:
1035 	  bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1036 	  break;
1037 
1038 	case REG_DEP_ANTI:
1039 	  bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1040 	  break;
1041 
1042 	case REG_DEP_CONTROL:
1043 	  bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1044 	  break;
1045 
1046 	default:
1047 	  gcc_unreachable ();
1048 	}
1049     }
1050   else
1051     {
1052       ds_t ds = DEP_STATUS (dep);
1053 
1054       if (ds & DEP_TRUE)
1055 	bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1056       if (ds & DEP_OUTPUT)
1057 	bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1058       if (ds & DEP_ANTI)
1059 	bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1060       if (ds & DEP_CONTROL)
1061 	bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1062 
1063       if (ds & SPECULATIVE)
1064 	{
1065 	  gcc_assert (current_sched_info->flags & DO_SPECULATION);
1066 	  bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1067 	}
1068     }
1069 }
1070 
1071 /* Type of dependence DEP have changed from OLD_TYPE.  Update dependency
1072    caches accordingly.  */
1073 static void
update_dependency_caches(dep_t dep,enum reg_note old_type)1074 update_dependency_caches (dep_t dep, enum reg_note old_type)
1075 {
1076   int elem_luid = INSN_LUID (DEP_PRO (dep));
1077   int insn_luid = INSN_LUID (DEP_CON (dep));
1078 
1079   /* Clear corresponding cache entry because type of the link
1080      may have changed.  Keep them if we use_deps_list.  */
1081   if (!(current_sched_info->flags & USE_DEPS_LIST))
1082     {
1083       switch (old_type)
1084 	{
1085 	case REG_DEP_OUTPUT:
1086 	  bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1087 	  break;
1088 
1089 	case REG_DEP_ANTI:
1090 	  bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1091 	  break;
1092 
1093 	case REG_DEP_CONTROL:
1094 	  bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1095 	  break;
1096 
1097 	default:
1098 	  gcc_unreachable ();
1099 	}
1100     }
1101 
1102   set_dependency_caches (dep);
1103 }
1104 
1105 /* Convert a dependence pointed to by SD_IT to be non-speculative.  */
1106 static void
change_spec_dep_to_hard(sd_iterator_def sd_it)1107 change_spec_dep_to_hard (sd_iterator_def sd_it)
1108 {
1109   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1110   dep_link_t link = DEP_NODE_BACK (node);
1111   dep_t dep = DEP_NODE_DEP (node);
1112   rtx_insn *elem = DEP_PRO (dep);
1113   rtx_insn *insn = DEP_CON (dep);
1114 
1115   move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1116 
1117   DEP_STATUS (dep) &= ~SPECULATIVE;
1118 
1119   if (true_dependency_cache != NULL)
1120     /* Clear the cache entry.  */
1121     bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1122 		      INSN_LUID (elem));
1123 }
1124 
1125 /* Update DEP to incorporate information from NEW_DEP.
1126    SD_IT points to DEP in case it should be moved to another list.
1127    MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1128    data-speculative dependence should be updated.  */
1129 static enum DEPS_ADJUST_RESULT
update_dep(dep_t dep,dep_t new_dep,sd_iterator_def sd_it ATTRIBUTE_UNUSED,rtx mem1 ATTRIBUTE_UNUSED,rtx mem2 ATTRIBUTE_UNUSED)1130 update_dep (dep_t dep, dep_t new_dep,
1131 	    sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1132 	    rtx mem1 ATTRIBUTE_UNUSED,
1133 	    rtx mem2 ATTRIBUTE_UNUSED)
1134 {
1135   enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1136   enum reg_note old_type = DEP_TYPE (dep);
1137   bool was_spec = dep_spec_p (dep);
1138 
1139   DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1140   DEP_MULTIPLE (dep) = 1;
1141 
1142   /* If this is a more restrictive type of dependence than the
1143      existing one, then change the existing dependence to this
1144      type.  */
1145   if ((int) DEP_TYPE (new_dep) < (int) old_type)
1146     {
1147       DEP_TYPE (dep) = DEP_TYPE (new_dep);
1148       res = DEP_CHANGED;
1149     }
1150 
1151   if (current_sched_info->flags & USE_DEPS_LIST)
1152     /* Update DEP_STATUS.  */
1153     {
1154       ds_t dep_status = DEP_STATUS (dep);
1155       ds_t ds = DEP_STATUS (new_dep);
1156       ds_t new_status = ds | dep_status;
1157 
1158       if (new_status & SPECULATIVE)
1159 	{
1160 	  /* Either existing dep or a dep we're adding or both are
1161 	     speculative.  */
1162 	  if (!(ds & SPECULATIVE)
1163 	      || !(dep_status & SPECULATIVE))
1164 	    /* The new dep can't be speculative.  */
1165 	    new_status &= ~SPECULATIVE;
1166 	  else
1167 	    {
1168 	      /* Both are speculative.  Merge probabilities.  */
1169 	      if (mem1 != NULL)
1170 		{
1171 		  dw_t dw;
1172 
1173 		  dw = estimate_dep_weak (mem1, mem2);
1174 		  ds = set_dep_weak (ds, BEGIN_DATA, dw);
1175 		}
1176 
1177 	      new_status = ds_merge (dep_status, ds);
1178 	    }
1179 	}
1180 
1181       ds = new_status;
1182 
1183       if (dep_status != ds)
1184 	{
1185 	  DEP_STATUS (dep) = ds;
1186 	  res = DEP_CHANGED;
1187 	}
1188     }
1189 
1190   if (was_spec && !dep_spec_p (dep))
1191     /* The old dep was speculative, but now it isn't.  */
1192     change_spec_dep_to_hard (sd_it);
1193 
1194   if (true_dependency_cache != NULL
1195       && res == DEP_CHANGED)
1196     update_dependency_caches (dep, old_type);
1197 
1198   return res;
1199 }
1200 
1201 /* Add or update  a dependence described by DEP.
1202    MEM1 and MEM2, if non-null, correspond to memory locations in case of
1203    data speculation.
1204 
1205    The function returns a value indicating if an old entry has been changed
1206    or a new entry has been added to insn's backward deps or nothing has
1207    been updated at all.  */
1208 static enum DEPS_ADJUST_RESULT
add_or_update_dep_1(dep_t new_dep,bool resolved_p,rtx mem1 ATTRIBUTE_UNUSED,rtx mem2 ATTRIBUTE_UNUSED)1209 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1210 		     rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1211 {
1212   bool maybe_present_p = true;
1213   bool present_p = false;
1214 
1215   gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1216 	      && DEP_PRO (new_dep) != DEP_CON (new_dep));
1217 
1218   if (flag_checking)
1219     check_dep (new_dep, mem1 != NULL);
1220 
1221   if (true_dependency_cache != NULL)
1222     {
1223       switch (ask_dependency_caches (new_dep))
1224 	{
1225 	case DEP_PRESENT:
1226 	  dep_t present_dep;
1227 	  sd_iterator_def sd_it;
1228 
1229 	  present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1230 						      DEP_CON (new_dep),
1231 						      resolved_p, &sd_it);
1232 	  DEP_MULTIPLE (present_dep) = 1;
1233 	  return DEP_PRESENT;
1234 
1235 	case DEP_CHANGED:
1236 	  maybe_present_p = true;
1237 	  present_p = true;
1238 	  break;
1239 
1240 	case DEP_CREATED:
1241 	  maybe_present_p = false;
1242 	  present_p = false;
1243 	  break;
1244 
1245 	default:
1246 	  gcc_unreachable ();
1247 	  break;
1248 	}
1249     }
1250 
1251   /* Check that we don't already have this dependence.  */
1252   if (maybe_present_p)
1253     {
1254       dep_t present_dep;
1255       sd_iterator_def sd_it;
1256 
1257       gcc_assert (true_dependency_cache == NULL || present_p);
1258 
1259       present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1260 						  DEP_CON (new_dep),
1261 						  resolved_p, &sd_it);
1262 
1263       if (present_dep != NULL)
1264 	/* We found an existing dependency between ELEM and INSN.  */
1265 	return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1266       else
1267 	/* We didn't find a dep, it shouldn't present in the cache.  */
1268 	gcc_assert (!present_p);
1269     }
1270 
1271   /* Might want to check one level of transitivity to save conses.
1272      This check should be done in maybe_add_or_update_dep_1.
1273      Since we made it to add_or_update_dep_1, we must create
1274      (or update) a link.  */
1275 
1276   if (mem1 != NULL_RTX)
1277     {
1278       gcc_assert (sched_deps_info->generate_spec_deps);
1279       DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1280 					   estimate_dep_weak (mem1, mem2));
1281     }
1282 
1283   sd_add_dep (new_dep, resolved_p);
1284 
1285   return DEP_CREATED;
1286 }
1287 
1288 /* Initialize BACK_LIST_PTR with consumer's backward list and
1289    FORW_LIST_PTR with producer's forward list.  If RESOLVED_P is true
1290    initialize with lists that hold resolved deps.  */
1291 static void
get_back_and_forw_lists(dep_t dep,bool resolved_p,deps_list_t * back_list_ptr,deps_list_t * forw_list_ptr)1292 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1293 			 deps_list_t *back_list_ptr,
1294 			 deps_list_t *forw_list_ptr)
1295 {
1296   rtx_insn *con = DEP_CON (dep);
1297 
1298   if (!resolved_p)
1299     {
1300       if (dep_spec_p (dep))
1301 	*back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1302       else
1303 	*back_list_ptr = INSN_HARD_BACK_DEPS (con);
1304 
1305       *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1306     }
1307   else
1308     {
1309       *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1310       *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1311     }
1312 }
1313 
1314 /* Add dependence described by DEP.
1315    If RESOLVED_P is true treat the dependence as a resolved one.  */
1316 void
sd_add_dep(dep_t dep,bool resolved_p)1317 sd_add_dep (dep_t dep, bool resolved_p)
1318 {
1319   dep_node_t n = create_dep_node ();
1320   deps_list_t con_back_deps;
1321   deps_list_t pro_forw_deps;
1322   rtx_insn *elem = DEP_PRO (dep);
1323   rtx_insn *insn = DEP_CON (dep);
1324 
1325   gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1326 
1327   if ((current_sched_info->flags & DO_SPECULATION) == 0
1328       || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1329     DEP_STATUS (dep) &= ~SPECULATIVE;
1330 
1331   copy_dep (DEP_NODE_DEP (n), dep);
1332 
1333   get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1334 
1335   add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1336 
1337   if (flag_checking)
1338     check_dep (dep, false);
1339 
1340   add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1341 
1342   /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1343      in the bitmap caches of dependency information.  */
1344   if (true_dependency_cache != NULL)
1345     set_dependency_caches (dep);
1346 }
1347 
1348 /* Add or update backward dependence between INSN and ELEM
1349    with given type DEP_TYPE and dep_status DS.
1350    This function is a convenience wrapper.  */
1351 enum DEPS_ADJUST_RESULT
sd_add_or_update_dep(dep_t dep,bool resolved_p)1352 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1353 {
1354   return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1355 }
1356 
1357 /* Resolved dependence pointed to by SD_IT.
1358    SD_IT will advance to the next element.  */
1359 void
sd_resolve_dep(sd_iterator_def sd_it)1360 sd_resolve_dep (sd_iterator_def sd_it)
1361 {
1362   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1363   dep_t dep = DEP_NODE_DEP (node);
1364   rtx_insn *pro = DEP_PRO (dep);
1365   rtx_insn *con = DEP_CON (dep);
1366 
1367   if (dep_spec_p (dep))
1368     move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1369 		   INSN_RESOLVED_BACK_DEPS (con));
1370   else
1371     move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1372 		   INSN_RESOLVED_BACK_DEPS (con));
1373 
1374   move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1375 		 INSN_RESOLVED_FORW_DEPS (pro));
1376 }
1377 
1378 /* Perform the inverse operation of sd_resolve_dep.  Restore the dependence
1379    pointed to by SD_IT to unresolved state.  */
1380 void
sd_unresolve_dep(sd_iterator_def sd_it)1381 sd_unresolve_dep (sd_iterator_def sd_it)
1382 {
1383   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1384   dep_t dep = DEP_NODE_DEP (node);
1385   rtx_insn *pro = DEP_PRO (dep);
1386   rtx_insn *con = DEP_CON (dep);
1387 
1388   if (dep_spec_p (dep))
1389     move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1390 		   INSN_SPEC_BACK_DEPS (con));
1391   else
1392     move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1393 		   INSN_HARD_BACK_DEPS (con));
1394 
1395   move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1396 		 INSN_FORW_DEPS (pro));
1397 }
1398 
1399 /* Make TO depend on all the FROM's producers.
1400    If RESOLVED_P is true add dependencies to the resolved lists.  */
1401 void
sd_copy_back_deps(rtx_insn * to,rtx_insn * from,bool resolved_p)1402 sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
1403 {
1404   sd_list_types_def list_type;
1405   sd_iterator_def sd_it;
1406   dep_t dep;
1407 
1408   list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1409 
1410   FOR_EACH_DEP (from, list_type, sd_it, dep)
1411     {
1412       dep_def _new_dep, *new_dep = &_new_dep;
1413 
1414       copy_dep (new_dep, dep);
1415       DEP_CON (new_dep) = to;
1416       sd_add_dep (new_dep, resolved_p);
1417     }
1418 }
1419 
1420 /* Remove a dependency referred to by SD_IT.
1421    SD_IT will point to the next dependence after removal.  */
1422 void
sd_delete_dep(sd_iterator_def sd_it)1423 sd_delete_dep (sd_iterator_def sd_it)
1424 {
1425   dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1426   dep_t dep = DEP_NODE_DEP (n);
1427   rtx_insn *pro = DEP_PRO (dep);
1428   rtx_insn *con = DEP_CON (dep);
1429   deps_list_t con_back_deps;
1430   deps_list_t pro_forw_deps;
1431 
1432   if (true_dependency_cache != NULL)
1433     {
1434       int elem_luid = INSN_LUID (pro);
1435       int insn_luid = INSN_LUID (con);
1436 
1437       bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1438       bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1439       bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1440       bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1441 
1442       if (current_sched_info->flags & DO_SPECULATION)
1443 	bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1444     }
1445 
1446   get_back_and_forw_lists (dep, sd_it.resolved_p,
1447 			   &con_back_deps, &pro_forw_deps);
1448 
1449   remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1450   remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1451 
1452   delete_dep_node (n);
1453 }
1454 
1455 /* Dump size of the lists.  */
1456 #define DUMP_LISTS_SIZE (2)
1457 
1458 /* Dump dependencies of the lists.  */
1459 #define DUMP_LISTS_DEPS (4)
1460 
1461 /* Dump all information about the lists.  */
1462 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1463 
1464 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1465    FLAGS is a bit mask specifying what information about the lists needs
1466    to be printed.
1467    If FLAGS has the very first bit set, then dump all information about
1468    the lists and propagate this bit into the callee dump functions.  */
1469 static void
dump_lists(FILE * dump,rtx insn,sd_list_types_def types,int flags)1470 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1471 {
1472   sd_iterator_def sd_it;
1473   dep_t dep;
1474   int all;
1475 
1476   all = (flags & 1);
1477 
1478   if (all)
1479     flags |= DUMP_LISTS_ALL;
1480 
1481   fprintf (dump, "[");
1482 
1483   if (flags & DUMP_LISTS_SIZE)
1484     fprintf (dump, "%d; ", sd_lists_size (insn, types));
1485 
1486   if (flags & DUMP_LISTS_DEPS)
1487     {
1488       FOR_EACH_DEP (insn, types, sd_it, dep)
1489 	{
1490 	  dump_dep (dump, dep, dump_dep_flags | all);
1491 	  fprintf (dump, " ");
1492 	}
1493     }
1494 }
1495 
1496 /* Dump all information about deps_lists of INSN specified by TYPES
1497    to STDERR.  */
1498 void
sd_debug_lists(rtx insn,sd_list_types_def types)1499 sd_debug_lists (rtx insn, sd_list_types_def types)
1500 {
1501   dump_lists (stderr, insn, types, 1);
1502   fprintf (stderr, "\n");
1503 }
1504 
1505 /* A wrapper around add_dependence_1, to add a dependence of CON on
1506    PRO, with type DEP_TYPE.  This function implements special handling
1507    for REG_DEP_CONTROL dependencies.  For these, we optionally promote
1508    the type to REG_DEP_ANTI if we can determine that predication is
1509    impossible; otherwise we add additional true dependencies on the
1510    INSN_COND_DEPS list of the jump (which PRO must be).  */
1511 void
add_dependence(rtx_insn * con,rtx_insn * pro,enum reg_note dep_type)1512 add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
1513 {
1514   if (dep_type == REG_DEP_CONTROL
1515       && !(current_sched_info->flags & DO_PREDICATION))
1516     dep_type = REG_DEP_ANTI;
1517 
1518   /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1519      so we must also make the insn dependent on the setter of the
1520      condition.  */
1521   if (dep_type == REG_DEP_CONTROL)
1522     {
1523       rtx_insn *real_pro = pro;
1524       rtx_insn *other = real_insn_for_shadow (real_pro);
1525       rtx cond;
1526 
1527       if (other != NULL_RTX)
1528 	real_pro = other;
1529       cond = sched_get_reverse_condition_uncached (real_pro);
1530       /* Verify that the insn does not use a different value in
1531 	 the condition register than the one that was present at
1532 	 the jump.  */
1533       if (cond == NULL_RTX)
1534 	dep_type = REG_DEP_ANTI;
1535       else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1536 	{
1537 	  HARD_REG_SET uses;
1538 	  CLEAR_HARD_REG_SET (uses);
1539 	  note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1540 	  if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1541 	    dep_type = REG_DEP_ANTI;
1542 	}
1543       if (dep_type == REG_DEP_CONTROL)
1544 	{
1545 	  if (sched_verbose >= 5)
1546 	    fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1547 		     INSN_UID (real_pro));
1548 	  add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1549 			       REG_DEP_TRUE, false);
1550 	}
1551     }
1552 
1553   add_dependence_1 (con, pro, dep_type);
1554 }
1555 
1556 /* A convenience wrapper to operate on an entire list.  HARD should be
1557    true if DEP_NONREG should be set on newly created dependencies.  */
1558 
1559 static void
add_dependence_list(rtx_insn * insn,rtx_insn_list * list,int uncond,enum reg_note dep_type,bool hard)1560 add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
1561 		     enum reg_note dep_type, bool hard)
1562 {
1563   mark_as_hard = hard;
1564   for (; list; list = list->next ())
1565     {
1566       if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
1567 	add_dependence (insn, list->insn (), dep_type);
1568     }
1569   mark_as_hard = false;
1570 }
1571 
1572 /* Similar, but free *LISTP at the same time, when the context
1573    is not readonly.  HARD should be true if DEP_NONREG should be set on
1574    newly created dependencies.  */
1575 
1576 static void
add_dependence_list_and_free(struct deps_desc * deps,rtx_insn * insn,rtx_insn_list ** listp,int uncond,enum reg_note dep_type,bool hard)1577 add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
1578 			      rtx_insn_list **listp,
1579                               int uncond, enum reg_note dep_type, bool hard)
1580 {
1581   add_dependence_list (insn, *listp, uncond, dep_type, hard);
1582 
1583   /* We don't want to short-circuit dependencies involving debug
1584      insns, because they may cause actual dependencies to be
1585      disregarded.  */
1586   if (deps->readonly || DEBUG_INSN_P (insn))
1587     return;
1588 
1589   free_INSN_LIST_list (listp);
1590 }
1591 
1592 /* Remove all occurrences of INSN from LIST.  Return the number of
1593    occurrences removed.  */
1594 
1595 static int
remove_from_dependence_list(rtx_insn * insn,rtx_insn_list ** listp)1596 remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp)
1597 {
1598   int removed = 0;
1599 
1600   while (*listp)
1601     {
1602       if ((*listp)->insn () == insn)
1603         {
1604           remove_free_INSN_LIST_node (listp);
1605           removed++;
1606           continue;
1607         }
1608 
1609       listp = (rtx_insn_list **)&XEXP (*listp, 1);
1610     }
1611 
1612   return removed;
1613 }
1614 
1615 /* Same as above, but process two lists at once.  */
1616 static int
remove_from_both_dependence_lists(rtx_insn * insn,rtx_insn_list ** listp,rtx_expr_list ** exprp)1617 remove_from_both_dependence_lists (rtx_insn *insn,
1618 				   rtx_insn_list **listp,
1619 				   rtx_expr_list **exprp)
1620 {
1621   int removed = 0;
1622 
1623   while (*listp)
1624     {
1625       if (XEXP (*listp, 0) == insn)
1626         {
1627           remove_free_INSN_LIST_node (listp);
1628           remove_free_EXPR_LIST_node (exprp);
1629           removed++;
1630           continue;
1631         }
1632 
1633       listp = (rtx_insn_list **)&XEXP (*listp, 1);
1634       exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
1635     }
1636 
1637   return removed;
1638 }
1639 
1640 /* Clear all dependencies for an insn.  */
1641 static void
delete_all_dependences(rtx_insn * insn)1642 delete_all_dependences (rtx_insn *insn)
1643 {
1644   sd_iterator_def sd_it;
1645   dep_t dep;
1646 
1647   /* The below cycle can be optimized to clear the caches and back_deps
1648      in one call but that would provoke duplication of code from
1649      delete_dep ().  */
1650 
1651   for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1652        sd_iterator_cond (&sd_it, &dep);)
1653     sd_delete_dep (sd_it);
1654 }
1655 
1656 /* All insns in a scheduling group except the first should only have
1657    dependencies on the previous insn in the group.  So we find the
1658    first instruction in the scheduling group by walking the dependence
1659    chains backwards. Then we add the dependencies for the group to
1660    the previous nonnote insn.  */
1661 
1662 static void
chain_to_prev_insn(rtx_insn * insn)1663 chain_to_prev_insn (rtx_insn *insn)
1664 {
1665   sd_iterator_def sd_it;
1666   dep_t dep;
1667   rtx_insn *prev_nonnote;
1668 
1669   FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1670     {
1671       rtx_insn *i = insn;
1672       rtx_insn *pro = DEP_PRO (dep);
1673 
1674       do
1675 	{
1676 	  i = prev_nonnote_insn (i);
1677 
1678 	  if (pro == i)
1679 	    goto next_link;
1680 	} while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1681 
1682       if (! sched_insns_conditions_mutex_p (i, pro))
1683 	add_dependence (i, pro, DEP_TYPE (dep));
1684     next_link:;
1685     }
1686 
1687   delete_all_dependences (insn);
1688 
1689   prev_nonnote = prev_nonnote_nondebug_insn (insn);
1690   if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1691       && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1692     add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1693 }
1694 
1695 /* Process an insn's memory dependencies.  There are four kinds of
1696    dependencies:
1697 
1698    (0) read dependence: read follows read
1699    (1) true dependence: read follows write
1700    (2) output dependence: write follows write
1701    (3) anti dependence: write follows read
1702 
1703    We are careful to build only dependencies which actually exist, and
1704    use transitivity to avoid building too many links.  */
1705 
1706 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1707    The MEM is a memory reference contained within INSN, which we are saving
1708    so that we can do memory aliasing on it.  */
1709 
1710 static void
add_insn_mem_dependence(struct deps_desc * deps,bool read_p,rtx_insn * insn,rtx mem)1711 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1712 			 rtx_insn *insn, rtx mem)
1713 {
1714   rtx_insn_list **insn_list;
1715   rtx_insn_list *insn_node;
1716   rtx_expr_list **mem_list;
1717   rtx_expr_list *mem_node;
1718 
1719   gcc_assert (!deps->readonly);
1720   if (read_p)
1721     {
1722       insn_list = &deps->pending_read_insns;
1723       mem_list = &deps->pending_read_mems;
1724       if (!DEBUG_INSN_P (insn))
1725 	deps->pending_read_list_length++;
1726     }
1727   else
1728     {
1729       insn_list = &deps->pending_write_insns;
1730       mem_list = &deps->pending_write_mems;
1731       deps->pending_write_list_length++;
1732     }
1733 
1734   insn_node = alloc_INSN_LIST (insn, *insn_list);
1735   *insn_list = insn_node;
1736 
1737   if (sched_deps_info->use_cselib)
1738     {
1739       mem = shallow_copy_rtx (mem);
1740       XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1741 							GET_MODE (mem), insn);
1742     }
1743   mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1744   *mem_list = mem_node;
1745 }
1746 
1747 /* Make a dependency between every memory reference on the pending lists
1748    and INSN, thus flushing the pending lists.  FOR_READ is true if emitting
1749    dependencies for a read operation, similarly with FOR_WRITE.  */
1750 
1751 static void
flush_pending_lists(struct deps_desc * deps,rtx_insn * insn,int for_read,int for_write)1752 flush_pending_lists (struct deps_desc *deps, rtx_insn *insn, int for_read,
1753 		     int for_write)
1754 {
1755   if (for_write)
1756     {
1757       add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1758                                     1, REG_DEP_ANTI, true);
1759       if (!deps->readonly)
1760         {
1761           free_EXPR_LIST_list (&deps->pending_read_mems);
1762           deps->pending_read_list_length = 0;
1763         }
1764     }
1765 
1766   add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1767 				for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1768 				true);
1769 
1770   add_dependence_list_and_free (deps, insn,
1771                                 &deps->last_pending_memory_flush, 1,
1772                                 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1773 				true);
1774 
1775   add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1776 				REG_DEP_ANTI, true);
1777 
1778   if (DEBUG_INSN_P (insn))
1779     {
1780       if (for_write)
1781 	free_INSN_LIST_list (&deps->pending_read_insns);
1782       free_INSN_LIST_list (&deps->pending_write_insns);
1783       free_INSN_LIST_list (&deps->last_pending_memory_flush);
1784       free_INSN_LIST_list (&deps->pending_jump_insns);
1785     }
1786 
1787   if (!deps->readonly)
1788     {
1789       free_EXPR_LIST_list (&deps->pending_write_mems);
1790       deps->pending_write_list_length = 0;
1791 
1792       deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1793       deps->pending_flush_length = 1;
1794     }
1795   mark_as_hard = false;
1796 }
1797 
1798 /* Instruction which dependencies we are analyzing.  */
1799 static rtx_insn *cur_insn = NULL;
1800 
1801 /* Implement hooks for haifa scheduler.  */
1802 
1803 static void
haifa_start_insn(rtx_insn * insn)1804 haifa_start_insn (rtx_insn *insn)
1805 {
1806   gcc_assert (insn && !cur_insn);
1807 
1808   cur_insn = insn;
1809 }
1810 
1811 static void
haifa_finish_insn(void)1812 haifa_finish_insn (void)
1813 {
1814   cur_insn = NULL;
1815 }
1816 
1817 void
haifa_note_reg_set(int regno)1818 haifa_note_reg_set (int regno)
1819 {
1820   SET_REGNO_REG_SET (reg_pending_sets, regno);
1821 }
1822 
1823 void
haifa_note_reg_clobber(int regno)1824 haifa_note_reg_clobber (int regno)
1825 {
1826   SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1827 }
1828 
1829 void
haifa_note_reg_use(int regno)1830 haifa_note_reg_use (int regno)
1831 {
1832   SET_REGNO_REG_SET (reg_pending_uses, regno);
1833 }
1834 
1835 static void
haifa_note_mem_dep(rtx mem,rtx pending_mem,rtx_insn * pending_insn,ds_t ds)1836 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
1837 {
1838   if (!(ds & SPECULATIVE))
1839     {
1840       mem = NULL_RTX;
1841       pending_mem = NULL_RTX;
1842     }
1843   else
1844     gcc_assert (ds & BEGIN_DATA);
1845 
1846   {
1847     dep_def _dep, *dep = &_dep;
1848 
1849     init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1850                 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1851     DEP_NONREG (dep) = 1;
1852     maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1853   }
1854 
1855 }
1856 
1857 static void
haifa_note_dep(rtx_insn * elem,ds_t ds)1858 haifa_note_dep (rtx_insn *elem, ds_t ds)
1859 {
1860   dep_def _dep;
1861   dep_t dep = &_dep;
1862 
1863   init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1864   if (mark_as_hard)
1865     DEP_NONREG (dep) = 1;
1866   maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1867 }
1868 
1869 static void
note_reg_use(int r)1870 note_reg_use (int r)
1871 {
1872   if (sched_deps_info->note_reg_use)
1873     sched_deps_info->note_reg_use (r);
1874 }
1875 
1876 static void
note_reg_set(int r)1877 note_reg_set (int r)
1878 {
1879   if (sched_deps_info->note_reg_set)
1880     sched_deps_info->note_reg_set (r);
1881 }
1882 
1883 static void
note_reg_clobber(int r)1884 note_reg_clobber (int r)
1885 {
1886   if (sched_deps_info->note_reg_clobber)
1887     sched_deps_info->note_reg_clobber (r);
1888 }
1889 
1890 static void
note_mem_dep(rtx m1,rtx m2,rtx_insn * e,ds_t ds)1891 note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
1892 {
1893   if (sched_deps_info->note_mem_dep)
1894     sched_deps_info->note_mem_dep (m1, m2, e, ds);
1895 }
1896 
1897 static void
note_dep(rtx_insn * e,ds_t ds)1898 note_dep (rtx_insn *e, ds_t ds)
1899 {
1900   if (sched_deps_info->note_dep)
1901     sched_deps_info->note_dep (e, ds);
1902 }
1903 
1904 /* Return corresponding to DS reg_note.  */
1905 enum reg_note
ds_to_dt(ds_t ds)1906 ds_to_dt (ds_t ds)
1907 {
1908   if (ds & DEP_TRUE)
1909     return REG_DEP_TRUE;
1910   else if (ds & DEP_OUTPUT)
1911     return REG_DEP_OUTPUT;
1912   else if (ds & DEP_ANTI)
1913     return REG_DEP_ANTI;
1914   else
1915     {
1916       gcc_assert (ds & DEP_CONTROL);
1917       return REG_DEP_CONTROL;
1918     }
1919 }
1920 
1921 
1922 
1923 /* Functions for computation of info needed for register pressure
1924    sensitive insn scheduling.  */
1925 
1926 
1927 /* Allocate and return reg_use_data structure for REGNO and INSN.  */
1928 static struct reg_use_data *
create_insn_reg_use(int regno,rtx_insn * insn)1929 create_insn_reg_use (int regno, rtx_insn *insn)
1930 {
1931   struct reg_use_data *use;
1932 
1933   use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1934   use->regno = regno;
1935   use->insn = insn;
1936   use->next_insn_use = INSN_REG_USE_LIST (insn);
1937   INSN_REG_USE_LIST (insn) = use;
1938   return use;
1939 }
1940 
1941 /* Allocate reg_set_data structure for REGNO and INSN.  */
1942 static void
create_insn_reg_set(int regno,rtx insn)1943 create_insn_reg_set (int regno, rtx insn)
1944 {
1945   struct reg_set_data *set;
1946 
1947   set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1948   set->regno = regno;
1949   set->insn = insn;
1950   set->next_insn_set = INSN_REG_SET_LIST (insn);
1951   INSN_REG_SET_LIST (insn) = set;
1952 }
1953 
1954 /* Set up insn register uses for INSN and dependency context DEPS.  */
1955 static void
setup_insn_reg_uses(struct deps_desc * deps,rtx_insn * insn)1956 setup_insn_reg_uses (struct deps_desc *deps, rtx_insn *insn)
1957 {
1958   unsigned i;
1959   reg_set_iterator rsi;
1960   struct reg_use_data *use, *use2, *next;
1961   struct deps_reg *reg_last;
1962 
1963   EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1964     {
1965       if (i < FIRST_PSEUDO_REGISTER
1966 	  && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1967 	continue;
1968 
1969       if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1970 	  && ! REGNO_REG_SET_P (reg_pending_sets, i)
1971 	  && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1972 	/* Ignore use which is not dying.  */
1973 	continue;
1974 
1975       use = create_insn_reg_use (i, insn);
1976       use->next_regno_use = use;
1977       reg_last = &deps->reg_last[i];
1978 
1979       /* Create the cycle list of uses.  */
1980       for (rtx_insn_list *list = reg_last->uses; list; list = list->next ())
1981 	{
1982 	  use2 = create_insn_reg_use (i, list->insn ());
1983 	  next = use->next_regno_use;
1984 	  use->next_regno_use = use2;
1985 	  use2->next_regno_use = next;
1986 	}
1987     }
1988 }
1989 
1990 /* Register pressure info for the currently processed insn.  */
1991 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1992 
1993 /* Return TRUE if INSN has the use structure for REGNO.  */
1994 static bool
insn_use_p(rtx insn,int regno)1995 insn_use_p (rtx insn, int regno)
1996 {
1997   struct reg_use_data *use;
1998 
1999   for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2000     if (use->regno == regno)
2001       return true;
2002   return false;
2003 }
2004 
2005 /* Update the register pressure info after birth of pseudo register REGNO
2006    in INSN.  Arguments CLOBBER_P and UNUSED_P say correspondingly that
2007    the register is in clobber or unused after the insn.  */
2008 static void
mark_insn_pseudo_birth(rtx insn,int regno,bool clobber_p,bool unused_p)2009 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2010 {
2011   int incr, new_incr;
2012   enum reg_class cl;
2013 
2014   gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2015   cl = sched_regno_pressure_class[regno];
2016   if (cl != NO_REGS)
2017     {
2018       incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2019       if (clobber_p)
2020 	{
2021 	  new_incr = reg_pressure_info[cl].clobber_increase + incr;
2022 	  reg_pressure_info[cl].clobber_increase = new_incr;
2023 	}
2024       else if (unused_p)
2025 	{
2026 	  new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2027 	  reg_pressure_info[cl].unused_set_increase = new_incr;
2028 	}
2029       else
2030 	{
2031 	  new_incr = reg_pressure_info[cl].set_increase + incr;
2032 	  reg_pressure_info[cl].set_increase = new_incr;
2033 	  if (! insn_use_p (insn, regno))
2034 	    reg_pressure_info[cl].change += incr;
2035 	  create_insn_reg_set (regno, insn);
2036 	}
2037       gcc_assert (new_incr < (1 << INCREASE_BITS));
2038     }
2039 }
2040 
2041 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2042    hard registers involved in the birth.  */
2043 static void
mark_insn_hard_regno_birth(rtx insn,int regno,int nregs,bool clobber_p,bool unused_p)2044 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2045 			    bool clobber_p, bool unused_p)
2046 {
2047   enum reg_class cl;
2048   int new_incr, last = regno + nregs;
2049 
2050   while (regno < last)
2051     {
2052       gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2053       if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2054 	{
2055 	  cl = sched_regno_pressure_class[regno];
2056 	  if (cl != NO_REGS)
2057 	    {
2058 	      if (clobber_p)
2059 		{
2060 		  new_incr = reg_pressure_info[cl].clobber_increase + 1;
2061 		  reg_pressure_info[cl].clobber_increase = new_incr;
2062 		}
2063 	      else if (unused_p)
2064 		{
2065 		  new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2066 		  reg_pressure_info[cl].unused_set_increase = new_incr;
2067 		}
2068 	      else
2069 		{
2070 		  new_incr = reg_pressure_info[cl].set_increase + 1;
2071 		  reg_pressure_info[cl].set_increase = new_incr;
2072 		  if (! insn_use_p (insn, regno))
2073 		    reg_pressure_info[cl].change += 1;
2074 		  create_insn_reg_set (regno, insn);
2075 		}
2076 	      gcc_assert (new_incr < (1 << INCREASE_BITS));
2077 	    }
2078 	}
2079       regno++;
2080     }
2081 }
2082 
2083 /* Update the register pressure info after birth of pseudo or hard
2084    register REG in INSN.  Arguments CLOBBER_P and UNUSED_P say
2085    correspondingly that the register is in clobber or unused after the
2086    insn.  */
2087 static void
mark_insn_reg_birth(rtx insn,rtx reg,bool clobber_p,bool unused_p)2088 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2089 {
2090   int regno;
2091 
2092   if (GET_CODE (reg) == SUBREG)
2093     reg = SUBREG_REG (reg);
2094 
2095   if (! REG_P (reg))
2096     return;
2097 
2098   regno = REGNO (reg);
2099   if (regno < FIRST_PSEUDO_REGISTER)
2100     mark_insn_hard_regno_birth (insn, regno, REG_NREGS (reg),
2101 				clobber_p, unused_p);
2102   else
2103     mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2104 }
2105 
2106 /* Update the register pressure info after death of pseudo register
2107    REGNO.  */
2108 static void
mark_pseudo_death(int regno)2109 mark_pseudo_death (int regno)
2110 {
2111   int incr;
2112   enum reg_class cl;
2113 
2114   gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2115   cl = sched_regno_pressure_class[regno];
2116   if (cl != NO_REGS)
2117     {
2118       incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2119       reg_pressure_info[cl].change -= incr;
2120     }
2121 }
2122 
2123 /* Like mark_pseudo_death except that NREGS saying how many hard
2124    registers involved in the death.  */
2125 static void
mark_hard_regno_death(int regno,int nregs)2126 mark_hard_regno_death (int regno, int nregs)
2127 {
2128   enum reg_class cl;
2129   int last = regno + nregs;
2130 
2131   while (regno < last)
2132     {
2133       gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2134       if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2135 	{
2136 	  cl = sched_regno_pressure_class[regno];
2137 	  if (cl != NO_REGS)
2138 	    reg_pressure_info[cl].change -= 1;
2139 	}
2140       regno++;
2141     }
2142 }
2143 
2144 /* Update the register pressure info after death of pseudo or hard
2145    register REG.  */
2146 static void
mark_reg_death(rtx reg)2147 mark_reg_death (rtx reg)
2148 {
2149   int regno;
2150 
2151   if (GET_CODE (reg) == SUBREG)
2152     reg = SUBREG_REG (reg);
2153 
2154   if (! REG_P (reg))
2155     return;
2156 
2157   regno = REGNO (reg);
2158   if (regno < FIRST_PSEUDO_REGISTER)
2159     mark_hard_regno_death (regno, REG_NREGS (reg));
2160   else
2161     mark_pseudo_death (regno);
2162 }
2163 
2164 /* Process SETTER of REG.  DATA is an insn containing the setter.  */
2165 static void
mark_insn_reg_store(rtx reg,const_rtx setter,void * data)2166 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2167 {
2168   if (setter != NULL_RTX && GET_CODE (setter) != SET)
2169     return;
2170   mark_insn_reg_birth
2171     ((rtx) data, reg, false,
2172      find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2173 }
2174 
2175 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs.  */
2176 static void
mark_insn_reg_clobber(rtx reg,const_rtx setter,void * data)2177 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2178 {
2179   if (GET_CODE (setter) == CLOBBER)
2180     mark_insn_reg_birth ((rtx) data, reg, true, false);
2181 }
2182 
2183 /* Set up reg pressure info related to INSN.  */
2184 void
init_insn_reg_pressure_info(rtx_insn * insn)2185 init_insn_reg_pressure_info (rtx_insn *insn)
2186 {
2187   int i, len;
2188   enum reg_class cl;
2189   static struct reg_pressure_data *pressure_info;
2190   rtx link;
2191 
2192   gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
2193 
2194   if (! INSN_P (insn))
2195     return;
2196 
2197   for (i = 0; i < ira_pressure_classes_num; i++)
2198     {
2199       cl = ira_pressure_classes[i];
2200       reg_pressure_info[cl].clobber_increase = 0;
2201       reg_pressure_info[cl].set_increase = 0;
2202       reg_pressure_info[cl].unused_set_increase = 0;
2203       reg_pressure_info[cl].change = 0;
2204     }
2205 
2206   note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2207 
2208   note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2209 
2210   if (AUTO_INC_DEC)
2211     for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2212       if (REG_NOTE_KIND (link) == REG_INC)
2213 	mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2214 
2215   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2216     if (REG_NOTE_KIND (link) == REG_DEAD)
2217       mark_reg_death (XEXP (link, 0));
2218 
2219   len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2220   pressure_info
2221     = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2222   if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2223     INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2224 						    * sizeof (int), 1);
2225   for (i = 0; i < ira_pressure_classes_num; i++)
2226     {
2227       cl = ira_pressure_classes[i];
2228       pressure_info[i].clobber_increase
2229 	= reg_pressure_info[cl].clobber_increase;
2230       pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2231       pressure_info[i].unused_set_increase
2232 	= reg_pressure_info[cl].unused_set_increase;
2233       pressure_info[i].change = reg_pressure_info[cl].change;
2234     }
2235 }
2236 
2237 
2238 
2239 
2240 /* Internal variable for sched_analyze_[12] () functions.
2241    If it is nonzero, this means that sched_analyze_[12] looks
2242    at the most toplevel SET.  */
2243 static bool can_start_lhs_rhs_p;
2244 
2245 /* Extend reg info for the deps context DEPS given that
2246    we have just generated a register numbered REGNO.  */
2247 static void
extend_deps_reg_info(struct deps_desc * deps,int regno)2248 extend_deps_reg_info (struct deps_desc *deps, int regno)
2249 {
2250   int max_regno = regno + 1;
2251 
2252   gcc_assert (!reload_completed);
2253 
2254   /* In a readonly context, it would not hurt to extend info,
2255      but it should not be needed.  */
2256   if (reload_completed && deps->readonly)
2257     {
2258       deps->max_reg = max_regno;
2259       return;
2260     }
2261 
2262   if (max_regno > deps->max_reg)
2263     {
2264       deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2265                                    max_regno);
2266       memset (&deps->reg_last[deps->max_reg],
2267               0, (max_regno - deps->max_reg)
2268               * sizeof (struct deps_reg));
2269       deps->max_reg = max_regno;
2270     }
2271 }
2272 
2273 /* Extends REG_INFO_P if needed.  */
2274 void
maybe_extend_reg_info_p(void)2275 maybe_extend_reg_info_p (void)
2276 {
2277   /* Extend REG_INFO_P, if needed.  */
2278   if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2279     {
2280       size_t new_reg_info_p_size = max_regno + 128;
2281 
2282       gcc_assert (!reload_completed && sel_sched_p ());
2283 
2284       reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2285                                                     new_reg_info_p_size,
2286                                                     reg_info_p_size,
2287                                                     sizeof (*reg_info_p));
2288       reg_info_p_size = new_reg_info_p_size;
2289     }
2290 }
2291 
2292 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2293    The type of the reference is specified by REF and can be SET,
2294    CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE.  */
2295 
2296 static void
sched_analyze_reg(struct deps_desc * deps,int regno,machine_mode mode,enum rtx_code ref,rtx_insn * insn)2297 sched_analyze_reg (struct deps_desc *deps, int regno, machine_mode mode,
2298 		   enum rtx_code ref, rtx_insn *insn)
2299 {
2300   /* We could emit new pseudos in renaming.  Extend the reg structures.  */
2301   if (!reload_completed && sel_sched_p ()
2302       && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2303     extend_deps_reg_info (deps, regno);
2304 
2305   maybe_extend_reg_info_p ();
2306 
2307   /* A hard reg in a wide mode may really be multiple registers.
2308      If so, mark all of them just like the first.  */
2309   if (regno < FIRST_PSEUDO_REGISTER)
2310     {
2311       int i = hard_regno_nregs (regno, mode);
2312       if (ref == SET)
2313 	{
2314 	  while (--i >= 0)
2315 	    note_reg_set (regno + i);
2316 	}
2317       else if (ref == USE)
2318 	{
2319 	  while (--i >= 0)
2320 	    note_reg_use (regno + i);
2321 	}
2322       else
2323 	{
2324 	  while (--i >= 0)
2325 	    note_reg_clobber (regno + i);
2326 	}
2327     }
2328 
2329   /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2330      it does not reload.  Ignore these as they have served their
2331      purpose already.  */
2332   else if (regno >= deps->max_reg)
2333     {
2334       enum rtx_code code = GET_CODE (PATTERN (insn));
2335       gcc_assert (code == USE || code == CLOBBER);
2336     }
2337 
2338   else
2339     {
2340       if (ref == SET)
2341 	note_reg_set (regno);
2342       else if (ref == USE)
2343 	note_reg_use (regno);
2344       else
2345 	note_reg_clobber (regno);
2346 
2347       /* Pseudos that are REG_EQUIV to something may be replaced
2348 	 by that during reloading.  We need only add dependencies for
2349 	the address in the REG_EQUIV note.  */
2350       if (!reload_completed && get_reg_known_equiv_p (regno))
2351 	{
2352 	  rtx t = get_reg_known_value (regno);
2353 	  if (MEM_P (t))
2354 	    sched_analyze_2 (deps, XEXP (t, 0), insn);
2355 	}
2356 
2357       /* Don't let it cross a call after scheduling if it doesn't
2358 	 already cross one.  */
2359       if (REG_N_CALLS_CROSSED (regno) == 0)
2360 	{
2361 	  if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2362 	    deps->sched_before_next_call
2363 	      = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2364 	  else
2365 	    add_dependence_list (insn, deps->last_function_call, 1,
2366 				 REG_DEP_ANTI, false);
2367 	}
2368     }
2369 }
2370 
2371 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2372    rtx, X, creating all dependencies generated by the write to the
2373    destination of X, and reads of everything mentioned.  */
2374 
2375 static void
sched_analyze_1(struct deps_desc * deps,rtx x,rtx_insn * insn)2376 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2377 {
2378   rtx dest = XEXP (x, 0);
2379   enum rtx_code code = GET_CODE (x);
2380   bool cslr_p = can_start_lhs_rhs_p;
2381 
2382   can_start_lhs_rhs_p = false;
2383 
2384   gcc_assert (dest);
2385   if (dest == 0)
2386     return;
2387 
2388   if (cslr_p && sched_deps_info->start_lhs)
2389     sched_deps_info->start_lhs (dest);
2390 
2391   if (GET_CODE (dest) == PARALLEL)
2392     {
2393       int i;
2394 
2395       for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2396 	if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2397 	  sched_analyze_1 (deps,
2398 			   gen_rtx_CLOBBER (VOIDmode,
2399 					    XEXP (XVECEXP (dest, 0, i), 0)),
2400 			   insn);
2401 
2402       if (cslr_p && sched_deps_info->finish_lhs)
2403 	sched_deps_info->finish_lhs ();
2404 
2405       if (code == SET)
2406 	{
2407 	  can_start_lhs_rhs_p = cslr_p;
2408 
2409 	  sched_analyze_2 (deps, SET_SRC (x), insn);
2410 
2411 	  can_start_lhs_rhs_p = false;
2412 	}
2413 
2414       return;
2415     }
2416 
2417   while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2418 	 || GET_CODE (dest) == ZERO_EXTRACT)
2419     {
2420       if (GET_CODE (dest) == STRICT_LOW_PART
2421 	 || GET_CODE (dest) == ZERO_EXTRACT
2422 	 || read_modify_subreg_p (dest))
2423         {
2424 	  /* These both read and modify the result.  We must handle
2425              them as writes to get proper dependencies for following
2426              instructions.  We must handle them as reads to get proper
2427              dependencies from this to previous instructions.
2428              Thus we need to call sched_analyze_2.  */
2429 
2430 	  sched_analyze_2 (deps, XEXP (dest, 0), insn);
2431 	}
2432       if (GET_CODE (dest) == ZERO_EXTRACT)
2433 	{
2434 	  /* The second and third arguments are values read by this insn.  */
2435 	  sched_analyze_2 (deps, XEXP (dest, 1), insn);
2436 	  sched_analyze_2 (deps, XEXP (dest, 2), insn);
2437 	}
2438       dest = XEXP (dest, 0);
2439     }
2440 
2441   if (REG_P (dest))
2442     {
2443       int regno = REGNO (dest);
2444       machine_mode mode = GET_MODE (dest);
2445 
2446       sched_analyze_reg (deps, regno, mode, code, insn);
2447 
2448 #ifdef STACK_REGS
2449       /* Treat all writes to a stack register as modifying the TOS.  */
2450       if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2451 	{
2452 	  /* Avoid analyzing the same register twice.  */
2453 	  if (regno != FIRST_STACK_REG)
2454 	    sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2455 
2456 	  add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2457 			       FIRST_STACK_REG);
2458 	}
2459 #endif
2460     }
2461   else if (MEM_P (dest))
2462     {
2463       /* Writing memory.  */
2464       rtx t = dest;
2465 
2466       if (sched_deps_info->use_cselib)
2467 	{
2468 	  machine_mode address_mode = get_address_mode (dest);
2469 
2470 	  t = shallow_copy_rtx (dest);
2471 	  cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2472 				   GET_MODE (t), insn);
2473 	  XEXP (t, 0)
2474 	    = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2475 						insn);
2476 	}
2477       t = canon_rtx (t);
2478 
2479       /* Pending lists can't get larger with a readonly context.  */
2480       if (!deps->readonly
2481           && ((deps->pending_read_list_length + deps->pending_write_list_length)
2482               >= MAX_PENDING_LIST_LENGTH))
2483 	{
2484 	  /* Flush all pending reads and writes to prevent the pending lists
2485 	     from getting any larger.  Insn scheduling runs too slowly when
2486 	     these lists get long.  When compiling GCC with itself,
2487 	     this flush occurs 8 times for sparc, and 10 times for m88k using
2488 	     the default value of 32.  */
2489 	  flush_pending_lists (deps, insn, false, true);
2490 	}
2491       else
2492 	{
2493 	  rtx_insn_list *pending;
2494 	  rtx_expr_list *pending_mem;
2495 
2496 	  pending = deps->pending_read_insns;
2497 	  pending_mem = deps->pending_read_mems;
2498 	  while (pending)
2499 	    {
2500 	      if (anti_dependence (pending_mem->element (), t)
2501 		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2502 		note_mem_dep (t, pending_mem->element (), pending->insn (),
2503 			      DEP_ANTI);
2504 
2505 	      pending = pending->next ();
2506 	      pending_mem = pending_mem->next ();
2507 	    }
2508 
2509 	  pending = deps->pending_write_insns;
2510 	  pending_mem = deps->pending_write_mems;
2511 	  while (pending)
2512 	    {
2513 	      if (output_dependence (pending_mem->element (), t)
2514 		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2515 		note_mem_dep (t, pending_mem->element (),
2516 			      pending->insn (),
2517 			      DEP_OUTPUT);
2518 
2519 	      pending = pending->next ();
2520 	      pending_mem = pending_mem-> next ();
2521 	    }
2522 
2523 	  add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2524 			       REG_DEP_ANTI, true);
2525 	  add_dependence_list (insn, deps->pending_jump_insns, 1,
2526 			       REG_DEP_CONTROL, true);
2527 
2528           if (!deps->readonly)
2529             add_insn_mem_dependence (deps, false, insn, dest);
2530 	}
2531       sched_analyze_2 (deps, XEXP (dest, 0), insn);
2532     }
2533 
2534   if (cslr_p && sched_deps_info->finish_lhs)
2535     sched_deps_info->finish_lhs ();
2536 
2537   /* Analyze reads.  */
2538   if (GET_CODE (x) == SET)
2539     {
2540       can_start_lhs_rhs_p = cslr_p;
2541 
2542       sched_analyze_2 (deps, SET_SRC (x), insn);
2543 
2544       can_start_lhs_rhs_p = false;
2545     }
2546 }
2547 
2548 /* Analyze the uses of memory and registers in rtx X in INSN.  */
2549 static void
sched_analyze_2(struct deps_desc * deps,rtx x,rtx_insn * insn)2550 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2551 {
2552   int i;
2553   int j;
2554   enum rtx_code code;
2555   const char *fmt;
2556   bool cslr_p = can_start_lhs_rhs_p;
2557 
2558   can_start_lhs_rhs_p = false;
2559 
2560   gcc_assert (x);
2561   if (x == 0)
2562     return;
2563 
2564   if (cslr_p && sched_deps_info->start_rhs)
2565     sched_deps_info->start_rhs (x);
2566 
2567   code = GET_CODE (x);
2568 
2569   switch (code)
2570     {
2571     CASE_CONST_ANY:
2572     case SYMBOL_REF:
2573     case CONST:
2574     case LABEL_REF:
2575       /* Ignore constants.  */
2576       if (cslr_p && sched_deps_info->finish_rhs)
2577 	sched_deps_info->finish_rhs ();
2578 
2579       return;
2580 
2581     case CC0:
2582       if (!HAVE_cc0)
2583 	gcc_unreachable ();
2584 
2585       /* User of CC0 depends on immediately preceding insn.  */
2586       SCHED_GROUP_P (insn) = 1;
2587        /* Don't move CC0 setter to another block (it can set up the
2588         same flag for previous CC0 users which is safe).  */
2589       CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2590 
2591       if (cslr_p && sched_deps_info->finish_rhs)
2592 	sched_deps_info->finish_rhs ();
2593 
2594       return;
2595 
2596     case REG:
2597       {
2598 	int regno = REGNO (x);
2599 	machine_mode mode = GET_MODE (x);
2600 
2601 	sched_analyze_reg (deps, regno, mode, USE, insn);
2602 
2603 #ifdef STACK_REGS
2604       /* Treat all reads of a stack register as modifying the TOS.  */
2605       if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2606 	{
2607 	  /* Avoid analyzing the same register twice.  */
2608 	  if (regno != FIRST_STACK_REG)
2609 	    sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2610 	  sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2611 	}
2612 #endif
2613 
2614 	if (cslr_p && sched_deps_info->finish_rhs)
2615 	  sched_deps_info->finish_rhs ();
2616 
2617 	return;
2618       }
2619 
2620     case MEM:
2621       {
2622 	/* Reading memory.  */
2623 	rtx_insn_list *u;
2624 	rtx_insn_list *pending;
2625 	rtx_expr_list *pending_mem;
2626 	rtx t = x;
2627 
2628 	if (sched_deps_info->use_cselib)
2629 	  {
2630 	    machine_mode address_mode = get_address_mode (t);
2631 
2632 	    t = shallow_copy_rtx (t);
2633 	    cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2634 				     GET_MODE (t), insn);
2635 	    XEXP (t, 0)
2636 	      = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2637 						  insn);
2638 	  }
2639 
2640 	if (!DEBUG_INSN_P (insn))
2641 	  {
2642 	    t = canon_rtx (t);
2643 	    pending = deps->pending_read_insns;
2644 	    pending_mem = deps->pending_read_mems;
2645 	    while (pending)
2646 	      {
2647 		if (read_dependence (pending_mem->element (), t)
2648 		    && ! sched_insns_conditions_mutex_p (insn,
2649 							 pending->insn ()))
2650 		  note_mem_dep (t, pending_mem->element (),
2651 				pending->insn (),
2652 				DEP_ANTI);
2653 
2654 		pending = pending->next ();
2655 		pending_mem = pending_mem->next ();
2656 	      }
2657 
2658 	    pending = deps->pending_write_insns;
2659 	    pending_mem = deps->pending_write_mems;
2660 	    while (pending)
2661 	      {
2662 		if (true_dependence (pending_mem->element (), VOIDmode, t)
2663 		    && ! sched_insns_conditions_mutex_p (insn,
2664 							 pending->insn ()))
2665 		  note_mem_dep (t, pending_mem->element (),
2666 				pending->insn (),
2667 				sched_deps_info->generate_spec_deps
2668 				? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2669 
2670 		pending = pending->next ();
2671 		pending_mem = pending_mem->next ();
2672 	      }
2673 
2674 	    for (u = deps->last_pending_memory_flush; u; u = u->next ())
2675 	      add_dependence (insn, u->insn (), REG_DEP_ANTI);
2676 
2677 	    for (u = deps->pending_jump_insns; u; u = u->next ())
2678 	      if (deps_may_trap_p (x))
2679 		{
2680 		  if ((sched_deps_info->generate_spec_deps)
2681 		      && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2682 		    {
2683 		      ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2684 					      MAX_DEP_WEAK);
2685 
2686 		      note_dep (u->insn (), ds);
2687 		    }
2688 		  else
2689 		    add_dependence (insn, u->insn (), REG_DEP_CONTROL);
2690 		}
2691 	  }
2692 
2693 	/* Always add these dependencies to pending_reads, since
2694 	   this insn may be followed by a write.  */
2695 	if (!deps->readonly)
2696 	  {
2697 	    if ((deps->pending_read_list_length
2698 		 + deps->pending_write_list_length)
2699 		>= MAX_PENDING_LIST_LENGTH
2700 		&& !DEBUG_INSN_P (insn))
2701 	      flush_pending_lists (deps, insn, true, true);
2702 	    add_insn_mem_dependence (deps, true, insn, x);
2703 	  }
2704 
2705 	sched_analyze_2 (deps, XEXP (x, 0), insn);
2706 
2707 	if (cslr_p && sched_deps_info->finish_rhs)
2708 	  sched_deps_info->finish_rhs ();
2709 
2710 	return;
2711       }
2712 
2713     /* Force pending stores to memory in case a trap handler needs them.
2714        Also force pending loads from memory; loads and stores can segfault
2715        and the signal handler won't be triggered if the trap insn was moved
2716        above load or store insn.  */
2717     case TRAP_IF:
2718       flush_pending_lists (deps, insn, true, true);
2719       break;
2720 
2721     case PREFETCH:
2722       if (PREFETCH_SCHEDULE_BARRIER_P (x))
2723 	reg_pending_barrier = TRUE_BARRIER;
2724       /* Prefetch insn contains addresses only.  So if the prefetch
2725 	 address has no registers, there will be no dependencies on
2726 	 the prefetch insn.  This is wrong with result code
2727 	 correctness point of view as such prefetch can be moved below
2728 	 a jump insn which usually generates MOVE_BARRIER preventing
2729 	 to move insns containing registers or memories through the
2730 	 barrier.  It is also wrong with generated code performance
2731 	 point of view as prefetch withouth dependecies will have a
2732 	 tendency to be issued later instead of earlier.  It is hard
2733 	 to generate accurate dependencies for prefetch insns as
2734 	 prefetch has only the start address but it is better to have
2735 	 something than nothing.  */
2736       if (!deps->readonly)
2737 	{
2738 	  rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2739 	  if (sched_deps_info->use_cselib)
2740 	    cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2741 	  add_insn_mem_dependence (deps, true, insn, x);
2742 	}
2743       break;
2744 
2745     case UNSPEC_VOLATILE:
2746       flush_pending_lists (deps, insn, true, true);
2747       /* FALLTHRU */
2748 
2749     case ASM_OPERANDS:
2750     case ASM_INPUT:
2751       {
2752 	/* Traditional and volatile asm instructions must be considered to use
2753 	   and clobber all hard registers, all pseudo-registers and all of
2754 	   memory.  So must TRAP_IF and UNSPEC_VOLATILE operations.
2755 
2756 	   Consider for instance a volatile asm that changes the fpu rounding
2757 	   mode.  An insn should not be moved across this even if it only uses
2758 	   pseudo-regs because it might give an incorrectly rounded result.  */
2759 	if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2760 	    && !DEBUG_INSN_P (insn))
2761 	  reg_pending_barrier = TRUE_BARRIER;
2762 
2763 	/* For all ASM_OPERANDS, we must traverse the vector of input operands.
2764 	   We can not just fall through here since then we would be confused
2765 	   by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2766 	   traditional asms unlike their normal usage.  */
2767 
2768 	if (code == ASM_OPERANDS)
2769 	  {
2770 	    for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2771 	      sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2772 
2773 	    if (cslr_p && sched_deps_info->finish_rhs)
2774 	      sched_deps_info->finish_rhs ();
2775 
2776 	    return;
2777 	  }
2778 	break;
2779       }
2780 
2781     case PRE_DEC:
2782     case POST_DEC:
2783     case PRE_INC:
2784     case POST_INC:
2785       /* These both read and modify the result.  We must handle them as writes
2786          to get proper dependencies for following instructions.  We must handle
2787          them as reads to get proper dependencies from this to previous
2788          instructions.  Thus we need to pass them to both sched_analyze_1
2789          and sched_analyze_2.  We must call sched_analyze_2 first in order
2790          to get the proper antecedent for the read.  */
2791       sched_analyze_2 (deps, XEXP (x, 0), insn);
2792       sched_analyze_1 (deps, x, insn);
2793 
2794       if (cslr_p && sched_deps_info->finish_rhs)
2795 	sched_deps_info->finish_rhs ();
2796 
2797       return;
2798 
2799     case POST_MODIFY:
2800     case PRE_MODIFY:
2801       /* op0 = op0 + op1 */
2802       sched_analyze_2 (deps, XEXP (x, 0), insn);
2803       sched_analyze_2 (deps, XEXP (x, 1), insn);
2804       sched_analyze_1 (deps, x, insn);
2805 
2806       if (cslr_p && sched_deps_info->finish_rhs)
2807 	sched_deps_info->finish_rhs ();
2808 
2809       return;
2810 
2811     default:
2812       break;
2813     }
2814 
2815   /* Other cases: walk the insn.  */
2816   fmt = GET_RTX_FORMAT (code);
2817   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2818     {
2819       if (fmt[i] == 'e')
2820 	sched_analyze_2 (deps, XEXP (x, i), insn);
2821       else if (fmt[i] == 'E')
2822 	for (j = 0; j < XVECLEN (x, i); j++)
2823 	  sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2824     }
2825 
2826   if (cslr_p && sched_deps_info->finish_rhs)
2827     sched_deps_info->finish_rhs ();
2828 }
2829 
2830 /* Try to group two fusible insns together to prevent scheduler
2831    from scheduling them apart.  */
2832 
2833 static void
sched_macro_fuse_insns(rtx_insn * insn)2834 sched_macro_fuse_insns (rtx_insn *insn)
2835 {
2836   rtx_insn *prev;
2837   /* No target hook would return true for debug insn as any of the
2838      hook operand, and with very large sequences of only debug insns
2839      where on each we call sched_macro_fuse_insns it has quadratic
2840      compile time complexity.  */
2841   if (DEBUG_INSN_P (insn))
2842     return;
2843   prev = prev_nonnote_nondebug_insn (insn);
2844   if (!prev)
2845     return;
2846 
2847   if (any_condjump_p (insn))
2848     {
2849       unsigned int condreg1, condreg2;
2850       rtx cc_reg_1;
2851       targetm.fixed_condition_code_regs (&condreg1, &condreg2);
2852       cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2853       if (reg_referenced_p (cc_reg_1, PATTERN (insn))
2854 	  && modified_in_p (cc_reg_1, prev))
2855 	{
2856 	  if (targetm.sched.macro_fusion_pair_p (prev, insn))
2857 	    SCHED_GROUP_P (insn) = 1;
2858 	  return;
2859 	}
2860     }
2861 
2862   if (single_set (insn) && single_set (prev))
2863     {
2864       if (targetm.sched.macro_fusion_pair_p (prev, insn))
2865 	SCHED_GROUP_P (insn) = 1;
2866     }
2867 }
2868 
2869 /* Get the implicit reg pending clobbers for INSN and save them in TEMP.  */
2870 void
get_implicit_reg_pending_clobbers(HARD_REG_SET * temp,rtx_insn * insn)2871 get_implicit_reg_pending_clobbers (HARD_REG_SET *temp, rtx_insn *insn)
2872 {
2873   extract_insn (insn);
2874   preprocess_constraints (insn);
2875   alternative_mask preferred = get_preferred_alternatives (insn);
2876   ira_implicitly_set_insn_hard_regs (temp, preferred);
2877   AND_COMPL_HARD_REG_SET (*temp, ira_no_alloc_regs);
2878 }
2879 
2880 /* Analyze an INSN with pattern X to find all dependencies.  */
2881 static void
sched_analyze_insn(struct deps_desc * deps,rtx x,rtx_insn * insn)2882 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
2883 {
2884   RTX_CODE code = GET_CODE (x);
2885   rtx link;
2886   unsigned i;
2887   reg_set_iterator rsi;
2888 
2889   if (! reload_completed)
2890     {
2891       HARD_REG_SET temp;
2892       get_implicit_reg_pending_clobbers (&temp, insn);
2893       IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2894     }
2895 
2896   can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2897 			 && code == SET);
2898 
2899   /* Group compare and branch insns for macro-fusion.  */
2900   if (!deps->readonly
2901       && targetm.sched.macro_fusion_p
2902       && targetm.sched.macro_fusion_p ())
2903     sched_macro_fuse_insns (insn);
2904 
2905   if (may_trap_p (x))
2906     /* Avoid moving trapping instructions across function calls that might
2907        not always return.  */
2908     add_dependence_list (insn, deps->last_function_call_may_noreturn,
2909 			 1, REG_DEP_ANTI, true);
2910 
2911   /* We must avoid creating a situation in which two successors of the
2912      current block have different unwind info after scheduling.  If at any
2913      point the two paths re-join this leads to incorrect unwind info.  */
2914   /* ??? There are certain situations involving a forced frame pointer in
2915      which, with extra effort, we could fix up the unwind info at a later
2916      CFG join.  However, it seems better to notice these cases earlier
2917      during prologue generation and avoid marking the frame pointer setup
2918      as frame-related at all.  */
2919   if (RTX_FRAME_RELATED_P (insn))
2920     {
2921       /* Make sure prologue insn is scheduled before next jump.  */
2922       deps->sched_before_next_jump
2923 	= alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2924 
2925       /* Make sure epilogue insn is scheduled after preceding jumps.  */
2926       add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2927 			   REG_DEP_ANTI, true);
2928       add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2929 			   true);
2930     }
2931 
2932   if (code == COND_EXEC)
2933     {
2934       sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2935 
2936       /* ??? Should be recording conditions so we reduce the number of
2937 	 false dependencies.  */
2938       x = COND_EXEC_CODE (x);
2939       code = GET_CODE (x);
2940     }
2941   if (code == SET || code == CLOBBER)
2942     {
2943       sched_analyze_1 (deps, x, insn);
2944 
2945       /* Bare clobber insns are used for letting life analysis, reg-stack
2946 	 and others know that a value is dead.  Depend on the last call
2947 	 instruction so that reg-stack won't get confused.  */
2948       if (code == CLOBBER)
2949 	add_dependence_list (insn, deps->last_function_call, 1,
2950 			     REG_DEP_OUTPUT, true);
2951     }
2952   else if (code == PARALLEL)
2953     {
2954       for (i = XVECLEN (x, 0); i--;)
2955 	{
2956 	  rtx sub = XVECEXP (x, 0, i);
2957 	  code = GET_CODE (sub);
2958 
2959 	  if (code == COND_EXEC)
2960 	    {
2961 	      sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2962 	      sub = COND_EXEC_CODE (sub);
2963 	      code = GET_CODE (sub);
2964 	    }
2965 	  if (code == SET || code == CLOBBER)
2966 	    sched_analyze_1 (deps, sub, insn);
2967 	  else
2968 	    sched_analyze_2 (deps, sub, insn);
2969 	}
2970     }
2971   else
2972     sched_analyze_2 (deps, x, insn);
2973 
2974   /* Mark registers CLOBBERED or used by called function.  */
2975   if (CALL_P (insn))
2976     {
2977       for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2978 	{
2979 	  if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2980 	    sched_analyze_1 (deps, XEXP (link, 0), insn);
2981 	  else if (GET_CODE (XEXP (link, 0)) != SET)
2982 	    sched_analyze_2 (deps, XEXP (link, 0), insn);
2983 	}
2984       /* Don't schedule anything after a tail call, tail call needs
2985 	 to use at least all call-saved registers.  */
2986       if (SIBLING_CALL_P (insn))
2987 	reg_pending_barrier = TRUE_BARRIER;
2988       else if (find_reg_note (insn, REG_SETJMP, NULL))
2989 	reg_pending_barrier = MOVE_BARRIER;
2990     }
2991 
2992   if (JUMP_P (insn))
2993     {
2994       rtx_insn *next = next_nonnote_nondebug_insn (insn);
2995       if (next && BARRIER_P (next))
2996 	reg_pending_barrier = MOVE_BARRIER;
2997       else
2998 	{
2999 	  rtx_insn_list *pending;
3000 	  rtx_expr_list *pending_mem;
3001 
3002           if (sched_deps_info->compute_jump_reg_dependencies)
3003             {
3004               (*sched_deps_info->compute_jump_reg_dependencies)
3005 		(insn, reg_pending_control_uses);
3006 
3007               /* Make latency of jump equal to 0 by using anti-dependence.  */
3008               EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3009                 {
3010                   struct deps_reg *reg_last = &deps->reg_last[i];
3011                   add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
3012 				       false);
3013                   add_dependence_list (insn, reg_last->implicit_sets,
3014 				       0, REG_DEP_ANTI, false);
3015                   add_dependence_list (insn, reg_last->clobbers, 0,
3016 				       REG_DEP_ANTI, false);
3017                 }
3018             }
3019 
3020 	  /* All memory writes and volatile reads must happen before the
3021 	     jump.  Non-volatile reads must happen before the jump iff
3022 	     the result is needed by the above register used mask.  */
3023 
3024 	  pending = deps->pending_write_insns;
3025 	  pending_mem = deps->pending_write_mems;
3026 	  while (pending)
3027 	    {
3028 	      if (! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3029 		add_dependence (insn, pending->insn (),
3030 				REG_DEP_OUTPUT);
3031 	      pending = pending->next ();
3032 	      pending_mem = pending_mem->next ();
3033 	    }
3034 
3035 	  pending = deps->pending_read_insns;
3036 	  pending_mem = deps->pending_read_mems;
3037 	  while (pending)
3038 	    {
3039 	      if (MEM_VOLATILE_P (pending_mem->element ())
3040 		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3041 		add_dependence (insn, pending->insn (),
3042 				REG_DEP_OUTPUT);
3043 	      pending = pending->next ();
3044 	      pending_mem = pending_mem->next ();
3045 	    }
3046 
3047 	  add_dependence_list (insn, deps->last_pending_memory_flush, 1,
3048 			       REG_DEP_ANTI, true);
3049 	  add_dependence_list (insn, deps->pending_jump_insns, 1,
3050 			       REG_DEP_ANTI, true);
3051 	}
3052     }
3053 
3054   /* If this instruction can throw an exception, then moving it changes
3055      where block boundaries fall.  This is mighty confusing elsewhere.
3056      Therefore, prevent such an instruction from being moved.  Same for
3057      non-jump instructions that define block boundaries.
3058      ??? Unclear whether this is still necessary in EBB mode.  If not,
3059      add_branch_dependences should be adjusted for RGN mode instead.  */
3060   if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3061       || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
3062     reg_pending_barrier = MOVE_BARRIER;
3063 
3064   if (sched_pressure != SCHED_PRESSURE_NONE)
3065     {
3066       setup_insn_reg_uses (deps, insn);
3067       init_insn_reg_pressure_info (insn);
3068     }
3069 
3070   /* Add register dependencies for insn.  */
3071   if (DEBUG_INSN_P (insn))
3072     {
3073       rtx_insn *prev = deps->last_debug_insn;
3074       rtx_insn_list *u;
3075 
3076       if (!deps->readonly)
3077 	deps->last_debug_insn = insn;
3078 
3079       if (prev)
3080 	add_dependence (insn, prev, REG_DEP_ANTI);
3081 
3082       add_dependence_list (insn, deps->last_function_call, 1,
3083 			   REG_DEP_ANTI, false);
3084 
3085       if (!sel_sched_p ())
3086 	for (u = deps->last_pending_memory_flush; u; u = u->next ())
3087 	  add_dependence (insn, u->insn (), REG_DEP_ANTI);
3088 
3089       EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3090 	{
3091 	  struct deps_reg *reg_last = &deps->reg_last[i];
3092 	  add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
3093 	  /* There's no point in making REG_DEP_CONTROL dependencies for
3094 	     debug insns.  */
3095 	  add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3096 			       false);
3097 
3098 	  if (!deps->readonly)
3099 	    reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3100 	}
3101       CLEAR_REG_SET (reg_pending_uses);
3102 
3103       /* Quite often, a debug insn will refer to stuff in the
3104 	 previous instruction, but the reason we want this
3105 	 dependency here is to make sure the scheduler doesn't
3106 	 gratuitously move a debug insn ahead.  This could dirty
3107 	 DF flags and cause additional analysis that wouldn't have
3108 	 occurred in compilation without debug insns, and such
3109 	 additional analysis can modify the generated code.  */
3110       prev = PREV_INSN (insn);
3111 
3112       if (prev && NONDEBUG_INSN_P (prev))
3113 	add_dependence (insn, prev, REG_DEP_ANTI);
3114     }
3115   else
3116     {
3117       regset_head set_or_clobbered;
3118 
3119       EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3120 	{
3121 	  struct deps_reg *reg_last = &deps->reg_last[i];
3122 	  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3123 	  add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3124 			       false);
3125 	  add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3126 			       false);
3127 
3128 	  if (!deps->readonly)
3129 	    {
3130 	      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3131 	      reg_last->uses_length++;
3132 	    }
3133 	}
3134 
3135       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3136 	if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3137 	  {
3138 	    struct deps_reg *reg_last = &deps->reg_last[i];
3139 	    add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3140 	    add_dependence_list (insn, reg_last->implicit_sets, 0,
3141 				 REG_DEP_ANTI, false);
3142 	    add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3143 				 false);
3144 
3145 	    if (!deps->readonly)
3146 	      {
3147 		reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3148 		reg_last->uses_length++;
3149 	      }
3150 	  }
3151 
3152       if (targetm.sched.exposed_pipeline)
3153 	{
3154 	  INIT_REG_SET (&set_or_clobbered);
3155 	  bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3156 		      reg_pending_sets);
3157 	  EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3158 	    {
3159 	      struct deps_reg *reg_last = &deps->reg_last[i];
3160 	      rtx list;
3161 	      for (list = reg_last->uses; list; list = XEXP (list, 1))
3162 		{
3163 		  rtx other = XEXP (list, 0);
3164 		  if (INSN_CACHED_COND (other) != const_true_rtx
3165 		      && refers_to_regno_p (i, INSN_CACHED_COND (other)))
3166 		    INSN_CACHED_COND (other) = const_true_rtx;
3167 		}
3168 	    }
3169 	}
3170 
3171       /* If the current insn is conditional, we can't free any
3172 	 of the lists.  */
3173       if (sched_has_condition_p (insn))
3174 	{
3175 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3176 	    {
3177 	      struct deps_reg *reg_last = &deps->reg_last[i];
3178 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3179 				   false);
3180 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3181 				   REG_DEP_ANTI, false);
3182 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3183 				   false);
3184 	      add_dependence_list (insn, reg_last->control_uses, 0,
3185 				   REG_DEP_CONTROL, false);
3186 
3187 	      if (!deps->readonly)
3188 		{
3189 		  reg_last->clobbers
3190 		    = alloc_INSN_LIST (insn, reg_last->clobbers);
3191 		  reg_last->clobbers_length++;
3192 		}
3193 	    }
3194 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3195 	    {
3196 	      struct deps_reg *reg_last = &deps->reg_last[i];
3197 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3198 				   false);
3199 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3200 				   REG_DEP_ANTI, false);
3201 	      add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3202 				   false);
3203 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3204 				   false);
3205 	      add_dependence_list (insn, reg_last->control_uses, 0,
3206 				   REG_DEP_CONTROL, false);
3207 
3208 	      if (!deps->readonly)
3209 		reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3210 	    }
3211 	}
3212       else
3213 	{
3214 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3215 	    {
3216 	      struct deps_reg *reg_last = &deps->reg_last[i];
3217 	      if (reg_last->uses_length >= MAX_PENDING_LIST_LENGTH
3218 		  || reg_last->clobbers_length >= MAX_PENDING_LIST_LENGTH)
3219 		{
3220 		  add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3221 						REG_DEP_OUTPUT, false);
3222 		  add_dependence_list_and_free (deps, insn,
3223 						&reg_last->implicit_sets, 0,
3224 						REG_DEP_ANTI, false);
3225 		  add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3226 						REG_DEP_ANTI, false);
3227 		  add_dependence_list_and_free (deps, insn,
3228 						&reg_last->control_uses, 0,
3229 						REG_DEP_ANTI, false);
3230 		  add_dependence_list_and_free (deps, insn,
3231 						&reg_last->clobbers, 0,
3232 						REG_DEP_OUTPUT, false);
3233 
3234 		  if (!deps->readonly)
3235 		    {
3236 		      reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3237 		      reg_last->clobbers_length = 0;
3238 		      reg_last->uses_length = 0;
3239 		    }
3240 		}
3241 	      else
3242 		{
3243 		  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3244 				       false);
3245 		  add_dependence_list (insn, reg_last->implicit_sets, 0,
3246 				       REG_DEP_ANTI, false);
3247 		  add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3248 				       false);
3249 		  add_dependence_list (insn, reg_last->control_uses, 0,
3250 				       REG_DEP_CONTROL, false);
3251 		}
3252 
3253 	      if (!deps->readonly)
3254 		{
3255 		  reg_last->clobbers_length++;
3256 		  reg_last->clobbers
3257 		    = alloc_INSN_LIST (insn, reg_last->clobbers);
3258 		}
3259 	    }
3260 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3261 	    {
3262 	      struct deps_reg *reg_last = &deps->reg_last[i];
3263 
3264 	      add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3265 					    REG_DEP_OUTPUT, false);
3266 	      add_dependence_list_and_free (deps, insn,
3267 					    &reg_last->implicit_sets,
3268 					    0, REG_DEP_ANTI, false);
3269 	      add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3270 					    REG_DEP_OUTPUT, false);
3271 	      add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3272 					    REG_DEP_ANTI, false);
3273 	      add_dependence_list (insn, reg_last->control_uses, 0,
3274 				   REG_DEP_CONTROL, false);
3275 
3276 	      if (!deps->readonly)
3277 		{
3278 		  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3279 		  reg_last->uses_length = 0;
3280 		  reg_last->clobbers_length = 0;
3281 		}
3282 	    }
3283 	}
3284       if (!deps->readonly)
3285 	{
3286 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3287 	    {
3288 	      struct deps_reg *reg_last = &deps->reg_last[i];
3289 	      reg_last->control_uses
3290 		= alloc_INSN_LIST (insn, reg_last->control_uses);
3291 	    }
3292 	}
3293     }
3294 
3295   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3296     if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3297       {
3298 	struct deps_reg *reg_last = &deps->reg_last[i];
3299 	add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3300 	add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3301 	add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3302 	add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3303 			     false);
3304 
3305 	if (!deps->readonly)
3306 	  reg_last->implicit_sets
3307 	    = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3308       }
3309 
3310   if (!deps->readonly)
3311     {
3312       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3313       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3314       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3315       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3316 	if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3317 	    || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3318 	  SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3319 
3320       /* Set up the pending barrier found.  */
3321       deps->last_reg_pending_barrier = reg_pending_barrier;
3322     }
3323 
3324   CLEAR_REG_SET (reg_pending_uses);
3325   CLEAR_REG_SET (reg_pending_clobbers);
3326   CLEAR_REG_SET (reg_pending_sets);
3327   CLEAR_REG_SET (reg_pending_control_uses);
3328   CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3329   CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3330 
3331   /* Add dependencies if a scheduling barrier was found.  */
3332   if (reg_pending_barrier)
3333     {
3334       /* In the case of barrier the most added dependencies are not
3335          real, so we use anti-dependence here.  */
3336       if (sched_has_condition_p (insn))
3337 	{
3338 	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3339 	    {
3340 	      struct deps_reg *reg_last = &deps->reg_last[i];
3341 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3342 				   true);
3343 	      add_dependence_list (insn, reg_last->sets, 0,
3344 				   reg_pending_barrier == TRUE_BARRIER
3345 				   ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3346 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3347 				   REG_DEP_ANTI, true);
3348 	      add_dependence_list (insn, reg_last->clobbers, 0,
3349 				   reg_pending_barrier == TRUE_BARRIER
3350 				   ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3351 	    }
3352 	}
3353       else
3354 	{
3355 	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3356 	    {
3357 	      struct deps_reg *reg_last = &deps->reg_last[i];
3358 	      add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3359 					    REG_DEP_ANTI, true);
3360 	      add_dependence_list_and_free (deps, insn,
3361 					    &reg_last->control_uses, 0,
3362 					    REG_DEP_CONTROL, true);
3363 	      add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3364 					    reg_pending_barrier == TRUE_BARRIER
3365 					    ? REG_DEP_TRUE : REG_DEP_ANTI,
3366 					    true);
3367 	      add_dependence_list_and_free (deps, insn,
3368 					    &reg_last->implicit_sets, 0,
3369 					    REG_DEP_ANTI, true);
3370 	      add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3371 					    reg_pending_barrier == TRUE_BARRIER
3372 					    ? REG_DEP_TRUE : REG_DEP_ANTI,
3373 					    true);
3374 
3375               if (!deps->readonly)
3376                 {
3377                   reg_last->uses_length = 0;
3378                   reg_last->clobbers_length = 0;
3379                 }
3380 	    }
3381 	}
3382 
3383       if (!deps->readonly)
3384         for (i = 0; i < (unsigned)deps->max_reg; i++)
3385           {
3386             struct deps_reg *reg_last = &deps->reg_last[i];
3387             reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3388             SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3389           }
3390 
3391       /* Don't flush pending lists on speculative checks for
3392 	 selective scheduling.  */
3393       if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
3394 	flush_pending_lists (deps, insn, true, true);
3395 
3396       reg_pending_barrier = NOT_A_BARRIER;
3397     }
3398 
3399   /* If a post-call group is still open, see if it should remain so.
3400      This insn must be a simple move of a hard reg to a pseudo or
3401      vice-versa.
3402 
3403      We must avoid moving these insns for correctness on targets
3404      with small register classes, and for special registers like
3405      PIC_OFFSET_TABLE_REGNUM.  For simplicity, extend this to all
3406      hard regs for all targets.  */
3407 
3408   if (deps->in_post_call_group_p)
3409     {
3410       rtx tmp, set = single_set (insn);
3411       int src_regno, dest_regno;
3412 
3413       if (set == NULL)
3414 	{
3415 	  if (DEBUG_INSN_P (insn))
3416 	    /* We don't want to mark debug insns as part of the same
3417 	       sched group.  We know they really aren't, but if we use
3418 	       debug insns to tell that a call group is over, we'll
3419 	       get different code if debug insns are not there and
3420 	       instructions that follow seem like they should be part
3421 	       of the call group.
3422 
3423 	       Also, if we did, chain_to_prev_insn would move the
3424 	       deps of the debug insn to the call insn, modifying
3425 	       non-debug post-dependency counts of the debug insn
3426 	       dependencies and otherwise messing with the scheduling
3427 	       order.
3428 
3429 	       Instead, let such debug insns be scheduled freely, but
3430 	       keep the call group open in case there are insns that
3431 	       should be part of it afterwards.  Since we grant debug
3432 	       insns higher priority than even sched group insns, it
3433 	       will all turn out all right.  */
3434 	    goto debug_dont_end_call_group;
3435 	  else
3436 	    goto end_call_group;
3437 	}
3438 
3439       tmp = SET_DEST (set);
3440       if (GET_CODE (tmp) == SUBREG)
3441 	tmp = SUBREG_REG (tmp);
3442       if (REG_P (tmp))
3443 	dest_regno = REGNO (tmp);
3444       else
3445 	goto end_call_group;
3446 
3447       tmp = SET_SRC (set);
3448       if (GET_CODE (tmp) == SUBREG)
3449 	tmp = SUBREG_REG (tmp);
3450       if ((GET_CODE (tmp) == PLUS
3451 	   || GET_CODE (tmp) == MINUS)
3452 	  && REG_P (XEXP (tmp, 0))
3453 	  && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3454 	  && dest_regno == STACK_POINTER_REGNUM)
3455 	src_regno = STACK_POINTER_REGNUM;
3456       else if (REG_P (tmp))
3457 	src_regno = REGNO (tmp);
3458       else
3459 	goto end_call_group;
3460 
3461       if (src_regno < FIRST_PSEUDO_REGISTER
3462 	  || dest_regno < FIRST_PSEUDO_REGISTER)
3463 	{
3464 	  if (!deps->readonly
3465               && deps->in_post_call_group_p == post_call_initial)
3466 	    deps->in_post_call_group_p = post_call;
3467 
3468           if (!sel_sched_p () || sched_emulate_haifa_p)
3469             {
3470               SCHED_GROUP_P (insn) = 1;
3471               CANT_MOVE (insn) = 1;
3472             }
3473 	}
3474       else
3475 	{
3476 	end_call_group:
3477           if (!deps->readonly)
3478             deps->in_post_call_group_p = not_post_call;
3479 	}
3480     }
3481 
3482  debug_dont_end_call_group:
3483   if ((current_sched_info->flags & DO_SPECULATION)
3484       && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3485     /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3486        be speculated.  */
3487     {
3488       if (sel_sched_p ())
3489         sel_mark_hard_insn (insn);
3490       else
3491         {
3492           sd_iterator_def sd_it;
3493           dep_t dep;
3494 
3495           for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3496                sd_iterator_cond (&sd_it, &dep);)
3497             change_spec_dep_to_hard (sd_it);
3498         }
3499     }
3500 
3501   /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3502      honor their original ordering.  */
3503   if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3504     {
3505       if (deps->last_args_size)
3506 	add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
3507       if (!deps->readonly)
3508 	deps->last_args_size = insn;
3509     }
3510 
3511   /* We must not mix prologue and epilogue insns.  See PR78029.  */
3512   if (prologue_contains (insn))
3513     {
3514       add_dependence_list (insn, deps->last_epilogue, true, REG_DEP_ANTI, true);
3515       if (!deps->readonly)
3516 	{
3517 	  if (deps->last_logue_was_epilogue)
3518 	    free_INSN_LIST_list (&deps->last_prologue);
3519 	  deps->last_prologue = alloc_INSN_LIST (insn, deps->last_prologue);
3520 	  deps->last_logue_was_epilogue = false;
3521 	}
3522     }
3523 
3524   if (epilogue_contains (insn))
3525     {
3526       add_dependence_list (insn, deps->last_prologue, true, REG_DEP_ANTI, true);
3527       if (!deps->readonly)
3528 	{
3529 	  if (!deps->last_logue_was_epilogue)
3530 	    free_INSN_LIST_list (&deps->last_epilogue);
3531 	  deps->last_epilogue = alloc_INSN_LIST (insn, deps->last_epilogue);
3532 	  deps->last_logue_was_epilogue = true;
3533 	}
3534     }
3535 }
3536 
3537 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3538    longjmp, loop forever, ...).  */
3539 /* FIXME: Why can't this function just use flags_from_decl_or_type and
3540    test for ECF_NORETURN?  */
3541 static bool
call_may_noreturn_p(rtx_insn * insn)3542 call_may_noreturn_p (rtx_insn *insn)
3543 {
3544   rtx call;
3545 
3546   /* const or pure calls that aren't looping will always return.  */
3547   if (RTL_CONST_OR_PURE_CALL_P (insn)
3548       && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3549     return false;
3550 
3551   call = get_call_rtx_from (insn);
3552   if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3553     {
3554       rtx symbol = XEXP (XEXP (call, 0), 0);
3555       if (SYMBOL_REF_DECL (symbol)
3556 	  && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3557 	{
3558 	  if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3559 	      == BUILT_IN_NORMAL)
3560 	    switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3561 	      {
3562 	      case BUILT_IN_BCMP:
3563 	      case BUILT_IN_BCOPY:
3564 	      case BUILT_IN_BZERO:
3565 	      case BUILT_IN_INDEX:
3566 	      case BUILT_IN_MEMCHR:
3567 	      case BUILT_IN_MEMCMP:
3568 	      case BUILT_IN_MEMCPY:
3569 	      case BUILT_IN_MEMMOVE:
3570 	      case BUILT_IN_MEMPCPY:
3571 	      case BUILT_IN_MEMSET:
3572 	      case BUILT_IN_RINDEX:
3573 	      case BUILT_IN_STPCPY:
3574 	      case BUILT_IN_STPNCPY:
3575 	      case BUILT_IN_STRCAT:
3576 	      case BUILT_IN_STRCHR:
3577 	      case BUILT_IN_STRCMP:
3578 	      case BUILT_IN_STRCPY:
3579 	      case BUILT_IN_STRCSPN:
3580 	      case BUILT_IN_STRLEN:
3581 	      case BUILT_IN_STRNCAT:
3582 	      case BUILT_IN_STRNCMP:
3583 	      case BUILT_IN_STRNCPY:
3584 	      case BUILT_IN_STRPBRK:
3585 	      case BUILT_IN_STRRCHR:
3586 	      case BUILT_IN_STRSPN:
3587 	      case BUILT_IN_STRSTR:
3588 		/* Assume certain string/memory builtins always return.  */
3589 		return false;
3590 	      default:
3591 		break;
3592 	      }
3593 	}
3594     }
3595 
3596   /* For all other calls assume that they might not always return.  */
3597   return true;
3598 }
3599 
3600 /* Return true if INSN should be made dependent on the previous instruction
3601    group, and if all INSN's dependencies should be moved to the first
3602    instruction of that group.  */
3603 
3604 static bool
chain_to_prev_insn_p(rtx_insn * insn)3605 chain_to_prev_insn_p (rtx_insn *insn)
3606 {
3607   /* INSN forms a group with the previous instruction.  */
3608   if (SCHED_GROUP_P (insn))
3609     return true;
3610 
3611   /* If the previous instruction clobbers a register R and this one sets
3612      part of R, the clobber was added specifically to help us track the
3613      liveness of R.  There's no point scheduling the clobber and leaving
3614      INSN behind, especially if we move the clobber to another block.  */
3615   rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
3616   if (prev
3617       && INSN_P (prev)
3618       && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3619       && GET_CODE (PATTERN (prev)) == CLOBBER)
3620     {
3621       rtx x = XEXP (PATTERN (prev), 0);
3622       if (set_of (x, insn))
3623 	return true;
3624     }
3625 
3626   return false;
3627 }
3628 
3629 /* Analyze INSN with DEPS as a context.  */
3630 void
deps_analyze_insn(struct deps_desc * deps,rtx_insn * insn)3631 deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
3632 {
3633   if (sched_deps_info->start_insn)
3634     sched_deps_info->start_insn (insn);
3635 
3636   /* Record the condition for this insn.  */
3637   if (NONDEBUG_INSN_P (insn))
3638     {
3639       rtx t;
3640       sched_get_condition_with_rev (insn, NULL);
3641       t = INSN_CACHED_COND (insn);
3642       INSN_COND_DEPS (insn) = NULL;
3643       if (reload_completed
3644 	  && (current_sched_info->flags & DO_PREDICATION)
3645 	  && COMPARISON_P (t)
3646 	  && REG_P (XEXP (t, 0))
3647 	  && CONSTANT_P (XEXP (t, 1)))
3648 	{
3649 	  unsigned int regno;
3650 	  int nregs;
3651 	  rtx_insn_list *cond_deps = NULL;
3652 	  t = XEXP (t, 0);
3653 	  regno = REGNO (t);
3654 	  nregs = REG_NREGS (t);
3655 	  while (nregs-- > 0)
3656 	    {
3657 	      struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3658 	      cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
3659 	      cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
3660 	      cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
3661 	    }
3662 	  INSN_COND_DEPS (insn) = cond_deps;
3663 	}
3664     }
3665 
3666   if (JUMP_P (insn))
3667     {
3668       /* Make each JUMP_INSN (but not a speculative check)
3669          a scheduling barrier for memory references.  */
3670       if (!deps->readonly
3671           && !(sel_sched_p ()
3672                && sel_insn_is_speculation_check (insn)))
3673         {
3674           /* Keep the list a reasonable size.  */
3675           if (deps->pending_flush_length++ >= MAX_PENDING_LIST_LENGTH)
3676             flush_pending_lists (deps, insn, true, true);
3677           else
3678 	    deps->pending_jump_insns
3679               = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3680         }
3681 
3682       /* For each insn which shouldn't cross a jump, add a dependence.  */
3683       add_dependence_list_and_free (deps, insn,
3684 				    &deps->sched_before_next_jump, 1,
3685 				    REG_DEP_ANTI, true);
3686 
3687       sched_analyze_insn (deps, PATTERN (insn), insn);
3688     }
3689   else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3690     {
3691       sched_analyze_insn (deps, PATTERN (insn), insn);
3692     }
3693   else if (CALL_P (insn))
3694     {
3695       int i;
3696 
3697       CANT_MOVE (insn) = 1;
3698 
3699       if (find_reg_note (insn, REG_SETJMP, NULL))
3700         {
3701           /* This is setjmp.  Assume that all registers, not just
3702              hard registers, may be clobbered by this call.  */
3703           reg_pending_barrier = MOVE_BARRIER;
3704         }
3705       else
3706         {
3707           for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3708             /* A call may read and modify global register variables.  */
3709             if (global_regs[i])
3710               {
3711                 SET_REGNO_REG_SET (reg_pending_sets, i);
3712                 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3713               }
3714           /* Other call-clobbered hard regs may be clobbered.
3715              Since we only have a choice between 'might be clobbered'
3716              and 'definitely not clobbered', we must include all
3717              partly call-clobbered registers here.  */
3718 	    else if (targetm.hard_regno_call_part_clobbered (i,
3719 							     reg_raw_mode[i])
3720                      || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3721               SET_REGNO_REG_SET (reg_pending_clobbers, i);
3722           /* We don't know what set of fixed registers might be used
3723              by the function, but it is certain that the stack pointer
3724              is among them, but be conservative.  */
3725             else if (fixed_regs[i])
3726 	      SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3727           /* The frame pointer is normally not used by the function
3728              itself, but by the debugger.  */
3729           /* ??? MIPS o32 is an exception.  It uses the frame pointer
3730              in the macro expansion of jal but does not represent this
3731              fact in the call_insn rtl.  */
3732             else if (i == FRAME_POINTER_REGNUM
3733                      || (i == HARD_FRAME_POINTER_REGNUM
3734                          && (! reload_completed || frame_pointer_needed)))
3735 	      SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3736         }
3737 
3738       /* For each insn which shouldn't cross a call, add a dependence
3739          between that insn and this call insn.  */
3740       add_dependence_list_and_free (deps, insn,
3741                                     &deps->sched_before_next_call, 1,
3742                                     REG_DEP_ANTI, true);
3743 
3744       sched_analyze_insn (deps, PATTERN (insn), insn);
3745 
3746       /* If CALL would be in a sched group, then this will violate
3747 	 convention that sched group insns have dependencies only on the
3748 	 previous instruction.
3749 
3750 	 Of course one can say: "Hey!  What about head of the sched group?"
3751 	 And I will answer: "Basic principles (one dep per insn) are always
3752 	 the same."  */
3753       gcc_assert (!SCHED_GROUP_P (insn));
3754 
3755       /* In the absence of interprocedural alias analysis, we must flush
3756          all pending reads and writes, and start new dependencies starting
3757          from here.  But only flush writes for constant calls (which may
3758          be passed a pointer to something we haven't written yet).  */
3759       flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3760 
3761       if (!deps->readonly)
3762         {
3763           /* Remember the last function call for limiting lifetimes.  */
3764           free_INSN_LIST_list (&deps->last_function_call);
3765           deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3766 
3767 	  if (call_may_noreturn_p (insn))
3768 	    {
3769 	      /* Remember the last function call that might not always return
3770 		 normally for limiting moves of trapping insns.  */
3771 	      free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3772 	      deps->last_function_call_may_noreturn
3773 		= alloc_INSN_LIST (insn, NULL_RTX);
3774 	    }
3775 
3776           /* Before reload, begin a post-call group, so as to keep the
3777              lifetimes of hard registers correct.  */
3778           if (! reload_completed)
3779             deps->in_post_call_group_p = post_call;
3780         }
3781     }
3782 
3783   if (sched_deps_info->use_cselib)
3784     cselib_process_insn (insn);
3785 
3786   if (sched_deps_info->finish_insn)
3787     sched_deps_info->finish_insn ();
3788 
3789   /* Fixup the dependencies in the sched group.  */
3790   if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3791       && chain_to_prev_insn_p (insn)
3792       && !sel_sched_p ())
3793     chain_to_prev_insn (insn);
3794 }
3795 
3796 /* Initialize DEPS for the new block beginning with HEAD.  */
3797 void
deps_start_bb(struct deps_desc * deps,rtx_insn * head)3798 deps_start_bb (struct deps_desc *deps, rtx_insn *head)
3799 {
3800   gcc_assert (!deps->readonly);
3801 
3802   /* Before reload, if the previous block ended in a call, show that
3803      we are inside a post-call group, so as to keep the lifetimes of
3804      hard registers correct.  */
3805   if (! reload_completed && !LABEL_P (head))
3806     {
3807       rtx_insn *insn = prev_nonnote_nondebug_insn (head);
3808 
3809       if (insn && CALL_P (insn))
3810 	deps->in_post_call_group_p = post_call_initial;
3811     }
3812 }
3813 
3814 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3815    dependencies for each insn.  */
3816 void
sched_analyze(struct deps_desc * deps,rtx_insn * head,rtx_insn * tail)3817 sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
3818 {
3819   rtx_insn *insn;
3820 
3821   if (sched_deps_info->use_cselib)
3822     cselib_init (CSELIB_RECORD_MEMORY);
3823 
3824   deps_start_bb (deps, head);
3825 
3826   for (insn = head;; insn = NEXT_INSN (insn))
3827     {
3828 
3829       if (INSN_P (insn))
3830 	{
3831 	  /* And initialize deps_lists.  */
3832 	  sd_init_insn (insn);
3833 	  /* Clean up SCHED_GROUP_P which may be set by last
3834 	     scheduler pass.  */
3835 	  if (SCHED_GROUP_P (insn))
3836 	    SCHED_GROUP_P (insn) = 0;
3837 	}
3838 
3839       deps_analyze_insn (deps, insn);
3840 
3841       if (insn == tail)
3842 	{
3843 	  if (sched_deps_info->use_cselib)
3844 	    cselib_finish ();
3845 	  return;
3846 	}
3847     }
3848   gcc_unreachable ();
3849 }
3850 
3851 /* Helper for sched_free_deps ().
3852    Delete INSN's (RESOLVED_P) backward dependencies.  */
3853 static void
delete_dep_nodes_in_back_deps(rtx_insn * insn,bool resolved_p)3854 delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p)
3855 {
3856   sd_iterator_def sd_it;
3857   dep_t dep;
3858   sd_list_types_def types;
3859 
3860   if (resolved_p)
3861     types = SD_LIST_RES_BACK;
3862   else
3863     types = SD_LIST_BACK;
3864 
3865   for (sd_it = sd_iterator_start (insn, types);
3866        sd_iterator_cond (&sd_it, &dep);)
3867     {
3868       dep_link_t link = *sd_it.linkp;
3869       dep_node_t node = DEP_LINK_NODE (link);
3870       deps_list_t back_list;
3871       deps_list_t forw_list;
3872 
3873       get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3874       remove_from_deps_list (link, back_list);
3875       delete_dep_node (node);
3876     }
3877 }
3878 
3879 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3880    deps_lists.  */
3881 void
sched_free_deps(rtx_insn * head,rtx_insn * tail,bool resolved_p)3882 sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
3883 {
3884   rtx_insn *insn;
3885   rtx_insn *next_tail = NEXT_INSN (tail);
3886 
3887   /* We make two passes since some insns may be scheduled before their
3888      dependencies are resolved.  */
3889   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3890     if (INSN_P (insn) && INSN_LUID (insn) > 0)
3891       {
3892 	/* Clear forward deps and leave the dep_nodes to the
3893 	   corresponding back_deps list.  */
3894 	if (resolved_p)
3895 	  clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3896 	else
3897 	  clear_deps_list (INSN_FORW_DEPS (insn));
3898       }
3899   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3900     if (INSN_P (insn) && INSN_LUID (insn) > 0)
3901       {
3902 	/* Clear resolved back deps together with its dep_nodes.  */
3903 	delete_dep_nodes_in_back_deps (insn, resolved_p);
3904 
3905 	sd_finish_insn (insn);
3906       }
3907 }
3908 
3909 /* Initialize variables for region data dependence analysis.
3910    When LAZY_REG_LAST is true, do not allocate reg_last array
3911    of struct deps_desc immediately.  */
3912 
3913 void
init_deps(struct deps_desc * deps,bool lazy_reg_last)3914 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3915 {
3916   int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3917 
3918   deps->max_reg = max_reg;
3919   if (lazy_reg_last)
3920     deps->reg_last = NULL;
3921   else
3922     deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3923   INIT_REG_SET (&deps->reg_last_in_use);
3924 
3925   deps->pending_read_insns = 0;
3926   deps->pending_read_mems = 0;
3927   deps->pending_write_insns = 0;
3928   deps->pending_write_mems = 0;
3929   deps->pending_jump_insns = 0;
3930   deps->pending_read_list_length = 0;
3931   deps->pending_write_list_length = 0;
3932   deps->pending_flush_length = 0;
3933   deps->last_pending_memory_flush = 0;
3934   deps->last_function_call = 0;
3935   deps->last_function_call_may_noreturn = 0;
3936   deps->sched_before_next_call = 0;
3937   deps->sched_before_next_jump = 0;
3938   deps->in_post_call_group_p = not_post_call;
3939   deps->last_debug_insn = 0;
3940   deps->last_args_size = 0;
3941   deps->last_prologue = 0;
3942   deps->last_epilogue = 0;
3943   deps->last_logue_was_epilogue = false;
3944   deps->last_reg_pending_barrier = NOT_A_BARRIER;
3945   deps->readonly = 0;
3946 }
3947 
3948 /* Init only reg_last field of DEPS, which was not allocated before as
3949    we inited DEPS lazily.  */
3950 void
init_deps_reg_last(struct deps_desc * deps)3951 init_deps_reg_last (struct deps_desc *deps)
3952 {
3953   gcc_assert (deps && deps->max_reg > 0);
3954   gcc_assert (deps->reg_last == NULL);
3955 
3956   deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3957 }
3958 
3959 
3960 /* Free insn lists found in DEPS.  */
3961 
3962 void
free_deps(struct deps_desc * deps)3963 free_deps (struct deps_desc *deps)
3964 {
3965   unsigned i;
3966   reg_set_iterator rsi;
3967 
3968   /* We set max_reg to 0 when this context was already freed.  */
3969   if (deps->max_reg == 0)
3970     {
3971       gcc_assert (deps->reg_last == NULL);
3972       return;
3973     }
3974   deps->max_reg = 0;
3975 
3976   free_INSN_LIST_list (&deps->pending_read_insns);
3977   free_EXPR_LIST_list (&deps->pending_read_mems);
3978   free_INSN_LIST_list (&deps->pending_write_insns);
3979   free_EXPR_LIST_list (&deps->pending_write_mems);
3980   free_INSN_LIST_list (&deps->last_pending_memory_flush);
3981 
3982   /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3983      times.  For a testcase with 42000 regs and 8000 small basic blocks,
3984      this loop accounted for nearly 60% (84 sec) of the total -O2 runtime.  */
3985   EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3986     {
3987       struct deps_reg *reg_last = &deps->reg_last[i];
3988       if (reg_last->uses)
3989 	free_INSN_LIST_list (&reg_last->uses);
3990       if (reg_last->sets)
3991 	free_INSN_LIST_list (&reg_last->sets);
3992       if (reg_last->implicit_sets)
3993 	free_INSN_LIST_list (&reg_last->implicit_sets);
3994       if (reg_last->control_uses)
3995 	free_INSN_LIST_list (&reg_last->control_uses);
3996       if (reg_last->clobbers)
3997 	free_INSN_LIST_list (&reg_last->clobbers);
3998     }
3999   CLEAR_REG_SET (&deps->reg_last_in_use);
4000 
4001   /* As we initialize reg_last lazily, it is possible that we didn't allocate
4002      it at all.  */
4003   free (deps->reg_last);
4004   deps->reg_last = NULL;
4005 
4006   deps = NULL;
4007 }
4008 
4009 /* Remove INSN from dependence contexts DEPS.  */
4010 void
remove_from_deps(struct deps_desc * deps,rtx_insn * insn)4011 remove_from_deps (struct deps_desc *deps, rtx_insn *insn)
4012 {
4013   int removed;
4014   unsigned i;
4015   reg_set_iterator rsi;
4016 
4017   removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
4018                                                &deps->pending_read_mems);
4019   if (!DEBUG_INSN_P (insn))
4020     deps->pending_read_list_length -= removed;
4021   removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
4022                                                &deps->pending_write_mems);
4023   deps->pending_write_list_length -= removed;
4024 
4025   removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
4026   deps->pending_flush_length -= removed;
4027   removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
4028   deps->pending_flush_length -= removed;
4029 
4030   unsigned to_clear = -1U;
4031   EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
4032     {
4033       if (to_clear != -1U)
4034 	{
4035 	  CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
4036 	  to_clear = -1U;
4037 	}
4038       struct deps_reg *reg_last = &deps->reg_last[i];
4039       if (reg_last->uses)
4040 	remove_from_dependence_list (insn, &reg_last->uses);
4041       if (reg_last->sets)
4042 	remove_from_dependence_list (insn, &reg_last->sets);
4043       if (reg_last->implicit_sets)
4044 	remove_from_dependence_list (insn, &reg_last->implicit_sets);
4045       if (reg_last->clobbers)
4046 	remove_from_dependence_list (insn, &reg_last->clobbers);
4047       if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
4048 	  && !reg_last->clobbers)
4049 	to_clear = i;
4050     }
4051   if (to_clear != -1U)
4052     CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
4053 
4054   if (CALL_P (insn))
4055     {
4056       remove_from_dependence_list (insn, &deps->last_function_call);
4057       remove_from_dependence_list (insn,
4058 				   &deps->last_function_call_may_noreturn);
4059     }
4060   remove_from_dependence_list (insn, &deps->sched_before_next_call);
4061 }
4062 
4063 /* Init deps data vector.  */
4064 static void
init_deps_data_vector(void)4065 init_deps_data_vector (void)
4066 {
4067   int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4068   if (reserve > 0 && ! h_d_i_d.space (reserve))
4069     h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
4070 }
4071 
4072 /* If it is profitable to use them, initialize or extend (depending on
4073    GLOBAL_P) dependency data.  */
4074 void
sched_deps_init(bool global_p)4075 sched_deps_init (bool global_p)
4076 {
4077   /* Average number of insns in the basic block.
4078      '+ 1' is used to make it nonzero.  */
4079   int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
4080 
4081   init_deps_data_vector ();
4082 
4083   /* We use another caching mechanism for selective scheduling, so
4084      we don't use this one.  */
4085   if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4086     {
4087       /* ?!? We could save some memory by computing a per-region luid mapping
4088          which could reduce both the number of vectors in the cache and the
4089          size of each vector.  Instead we just avoid the cache entirely unless
4090          the average number of instructions in a basic block is very high.  See
4091          the comment before the declaration of true_dependency_cache for
4092          what we consider "very high".  */
4093       cache_size = 0;
4094       extend_dependency_caches (sched_max_luid, true);
4095     }
4096 
4097   if (global_p)
4098     {
4099       dl_pool = new object_allocator<_deps_list> ("deps_list");
4100 				/* Allocate lists for one block at a time.  */
4101       dn_pool = new object_allocator<_dep_node> ("dep_node");
4102 				/* Allocate nodes for one block at a time.  */
4103     }
4104 }
4105 
4106 
4107 /* Create or extend (depending on CREATE_P) dependency caches to
4108    size N.  */
4109 void
extend_dependency_caches(int n,bool create_p)4110 extend_dependency_caches (int n, bool create_p)
4111 {
4112   if (create_p || true_dependency_cache)
4113     {
4114       int i, luid = cache_size + n;
4115 
4116       true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4117 					  luid);
4118       output_dependency_cache = XRESIZEVEC (bitmap_head,
4119 					    output_dependency_cache, luid);
4120       anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4121 					  luid);
4122       control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4123 					  luid);
4124 
4125       if (current_sched_info->flags & DO_SPECULATION)
4126         spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4127 					    luid);
4128 
4129       for (i = cache_size; i < luid; i++)
4130 	{
4131 	  bitmap_initialize (&true_dependency_cache[i], 0);
4132 	  bitmap_initialize (&output_dependency_cache[i], 0);
4133 	  bitmap_initialize (&anti_dependency_cache[i], 0);
4134 	  bitmap_initialize (&control_dependency_cache[i], 0);
4135 
4136           if (current_sched_info->flags & DO_SPECULATION)
4137             bitmap_initialize (&spec_dependency_cache[i], 0);
4138 	}
4139       cache_size = luid;
4140     }
4141 }
4142 
4143 /* Finalize dependency information for the whole function.  */
4144 void
sched_deps_finish(void)4145 sched_deps_finish (void)
4146 {
4147   gcc_assert (deps_pools_are_empty_p ());
4148   delete dn_pool;
4149   delete dl_pool;
4150   dn_pool = NULL;
4151   dl_pool = NULL;
4152 
4153   h_d_i_d.release ();
4154   cache_size = 0;
4155 
4156   if (true_dependency_cache)
4157     {
4158       int i;
4159 
4160       for (i = 0; i < cache_size; i++)
4161 	{
4162 	  bitmap_clear (&true_dependency_cache[i]);
4163 	  bitmap_clear (&output_dependency_cache[i]);
4164 	  bitmap_clear (&anti_dependency_cache[i]);
4165 	  bitmap_clear (&control_dependency_cache[i]);
4166 
4167           if (sched_deps_info->generate_spec_deps)
4168             bitmap_clear (&spec_dependency_cache[i]);
4169 	}
4170       free (true_dependency_cache);
4171       true_dependency_cache = NULL;
4172       free (output_dependency_cache);
4173       output_dependency_cache = NULL;
4174       free (anti_dependency_cache);
4175       anti_dependency_cache = NULL;
4176       free (control_dependency_cache);
4177       control_dependency_cache = NULL;
4178 
4179       if (sched_deps_info->generate_spec_deps)
4180         {
4181           free (spec_dependency_cache);
4182           spec_dependency_cache = NULL;
4183         }
4184 
4185     }
4186 }
4187 
4188 /* Initialize some global variables needed by the dependency analysis
4189    code.  */
4190 
4191 void
init_deps_global(void)4192 init_deps_global (void)
4193 {
4194   CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4195   CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4196   reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4197   reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4198   reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
4199   reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
4200   reg_pending_barrier = NOT_A_BARRIER;
4201 
4202   if (!sel_sched_p () || sched_emulate_haifa_p)
4203     {
4204       sched_deps_info->start_insn = haifa_start_insn;
4205       sched_deps_info->finish_insn = haifa_finish_insn;
4206 
4207       sched_deps_info->note_reg_set = haifa_note_reg_set;
4208       sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4209       sched_deps_info->note_reg_use = haifa_note_reg_use;
4210 
4211       sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4212       sched_deps_info->note_dep = haifa_note_dep;
4213    }
4214 }
4215 
4216 /* Free everything used by the dependency analysis code.  */
4217 
4218 void
finish_deps_global(void)4219 finish_deps_global (void)
4220 {
4221   FREE_REG_SET (reg_pending_sets);
4222   FREE_REG_SET (reg_pending_clobbers);
4223   FREE_REG_SET (reg_pending_uses);
4224   FREE_REG_SET (reg_pending_control_uses);
4225 }
4226 
4227 /* Estimate the weakness of dependence between MEM1 and MEM2.  */
4228 dw_t
estimate_dep_weak(rtx mem1,rtx mem2)4229 estimate_dep_weak (rtx mem1, rtx mem2)
4230 {
4231   if (mem1 == mem2)
4232     /* MEMs are the same - don't speculate.  */
4233     return MIN_DEP_WEAK;
4234 
4235   rtx r1 = XEXP (mem1, 0);
4236   rtx r2 = XEXP (mem2, 0);
4237 
4238   if (sched_deps_info->use_cselib)
4239     {
4240       /* We cannot call rtx_equal_for_cselib_p because the VALUEs might be
4241 	 dangling at this point, since we never preserve them.  Instead we
4242 	 canonicalize manually to get stable VALUEs out of hashing.  */
4243       if (GET_CODE (r1) == VALUE && CSELIB_VAL_PTR (r1))
4244 	r1 = canonical_cselib_val (CSELIB_VAL_PTR (r1))->val_rtx;
4245       if (GET_CODE (r2) == VALUE && CSELIB_VAL_PTR (r2))
4246 	r2 = canonical_cselib_val (CSELIB_VAL_PTR (r2))->val_rtx;
4247     }
4248 
4249   if (r1 == r2
4250       || (REG_P (r1) && REG_P (r2) && REGNO (r1) == REGNO (r2)))
4251     /* Again, MEMs are the same.  */
4252     return MIN_DEP_WEAK;
4253   else if ((REG_P (r1) && !REG_P (r2)) || (!REG_P (r1) && REG_P (r2)))
4254     /* Different addressing modes - reason to be more speculative,
4255        than usual.  */
4256     return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4257   else
4258     /* We can't say anything about the dependence.  */
4259     return UNCERTAIN_DEP_WEAK;
4260 }
4261 
4262 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4263    This function can handle same INSN and ELEM (INSN == ELEM).
4264    It is a convenience wrapper.  */
4265 static void
add_dependence_1(rtx_insn * insn,rtx_insn * elem,enum reg_note dep_type)4266 add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
4267 {
4268   ds_t ds;
4269   bool internal;
4270 
4271   if (dep_type == REG_DEP_TRUE)
4272     ds = DEP_TRUE;
4273   else if (dep_type == REG_DEP_OUTPUT)
4274     ds = DEP_OUTPUT;
4275   else if (dep_type == REG_DEP_CONTROL)
4276     ds = DEP_CONTROL;
4277   else
4278     {
4279       gcc_assert (dep_type == REG_DEP_ANTI);
4280       ds = DEP_ANTI;
4281     }
4282 
4283   /* When add_dependence is called from inside sched-deps.c, we expect
4284      cur_insn to be non-null.  */
4285   internal = cur_insn != NULL;
4286   if (internal)
4287     gcc_assert (insn == cur_insn);
4288   else
4289     cur_insn = insn;
4290 
4291   note_dep (elem, ds);
4292   if (!internal)
4293     cur_insn = NULL;
4294 }
4295 
4296 /* Return weakness of speculative type TYPE in the dep_status DS,
4297    without checking to prevent ICEs on malformed input.  */
4298 static dw_t
get_dep_weak_1(ds_t ds,ds_t type)4299 get_dep_weak_1 (ds_t ds, ds_t type)
4300 {
4301   ds = ds & type;
4302 
4303   switch (type)
4304     {
4305     case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4306     case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4307     case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4308     case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4309     default: gcc_unreachable ();
4310     }
4311 
4312   return (dw_t) ds;
4313 }
4314 
4315 /* Return weakness of speculative type TYPE in the dep_status DS.  */
4316 dw_t
get_dep_weak(ds_t ds,ds_t type)4317 get_dep_weak (ds_t ds, ds_t type)
4318 {
4319   dw_t dw = get_dep_weak_1 (ds, type);
4320 
4321   gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4322   return dw;
4323 }
4324 
4325 /* Return the dep_status, which has the same parameters as DS, except for
4326    speculative type TYPE, that will have weakness DW.  */
4327 ds_t
set_dep_weak(ds_t ds,ds_t type,dw_t dw)4328 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4329 {
4330   gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4331 
4332   ds &= ~type;
4333   switch (type)
4334     {
4335     case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4336     case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4337     case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4338     case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4339     default: gcc_unreachable ();
4340     }
4341   return ds;
4342 }
4343 
4344 /* Return the join of two dep_statuses DS1 and DS2.
4345    If MAX_P is true then choose the greater probability,
4346    otherwise multiply probabilities.
4347    This function assumes that both DS1 and DS2 contain speculative bits.  */
4348 static ds_t
ds_merge_1(ds_t ds1,ds_t ds2,bool max_p)4349 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4350 {
4351   ds_t ds, t;
4352 
4353   gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4354 
4355   ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4356 
4357   t = FIRST_SPEC_TYPE;
4358   do
4359     {
4360       if ((ds1 & t) && !(ds2 & t))
4361 	ds |= ds1 & t;
4362       else if (!(ds1 & t) && (ds2 & t))
4363 	ds |= ds2 & t;
4364       else if ((ds1 & t) && (ds2 & t))
4365 	{
4366 	  dw_t dw1 = get_dep_weak (ds1, t);
4367 	  dw_t dw2 = get_dep_weak (ds2, t);
4368 	  ds_t dw;
4369 
4370 	  if (!max_p)
4371 	    {
4372 	      dw = ((ds_t) dw1) * ((ds_t) dw2);
4373 	      dw /= MAX_DEP_WEAK;
4374 	      if (dw < MIN_DEP_WEAK)
4375 		dw = MIN_DEP_WEAK;
4376 	    }
4377 	  else
4378 	    {
4379 	      if (dw1 >= dw2)
4380 		dw = dw1;
4381 	      else
4382 		dw = dw2;
4383 	    }
4384 
4385 	  ds = set_dep_weak (ds, t, (dw_t) dw);
4386 	}
4387 
4388       if (t == LAST_SPEC_TYPE)
4389 	break;
4390       t <<= SPEC_TYPE_SHIFT;
4391     }
4392   while (1);
4393 
4394   return ds;
4395 }
4396 
4397 /* Return the join of two dep_statuses DS1 and DS2.
4398    This function assumes that both DS1 and DS2 contain speculative bits.  */
4399 ds_t
ds_merge(ds_t ds1,ds_t ds2)4400 ds_merge (ds_t ds1, ds_t ds2)
4401 {
4402   return ds_merge_1 (ds1, ds2, false);
4403 }
4404 
4405 /* Return the join of two dep_statuses DS1 and DS2.  */
4406 ds_t
ds_full_merge(ds_t ds,ds_t ds2,rtx mem1,rtx mem2)4407 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4408 {
4409   ds_t new_status = ds | ds2;
4410 
4411   if (new_status & SPECULATIVE)
4412     {
4413       if ((ds && !(ds & SPECULATIVE))
4414 	  || (ds2 && !(ds2 & SPECULATIVE)))
4415 	/* Then this dep can't be speculative.  */
4416 	new_status &= ~SPECULATIVE;
4417       else
4418 	{
4419 	  /* Both are speculative.  Merging probabilities.  */
4420 	  if (mem1)
4421 	    {
4422 	      dw_t dw;
4423 
4424 	      dw = estimate_dep_weak (mem1, mem2);
4425 	      ds = set_dep_weak (ds, BEGIN_DATA, dw);
4426 	    }
4427 
4428 	  if (!ds)
4429 	    new_status = ds2;
4430 	  else if (!ds2)
4431 	    new_status = ds;
4432 	  else
4433 	    new_status = ds_merge (ds2, ds);
4434 	}
4435     }
4436 
4437   return new_status;
4438 }
4439 
4440 /* Return the join of DS1 and DS2.  Use maximum instead of multiplying
4441    probabilities.  */
4442 ds_t
ds_max_merge(ds_t ds1,ds_t ds2)4443 ds_max_merge (ds_t ds1, ds_t ds2)
4444 {
4445   if (ds1 == 0 && ds2 == 0)
4446     return 0;
4447 
4448   if (ds1 == 0 && ds2 != 0)
4449     return ds2;
4450 
4451   if (ds1 != 0 && ds2 == 0)
4452     return ds1;
4453 
4454   return ds_merge_1 (ds1, ds2, true);
4455 }
4456 
4457 /* Return the probability of speculation success for the speculation
4458    status DS.  */
4459 dw_t
ds_weak(ds_t ds)4460 ds_weak (ds_t ds)
4461 {
4462   ds_t res = 1, dt;
4463   int n = 0;
4464 
4465   dt = FIRST_SPEC_TYPE;
4466   do
4467     {
4468       if (ds & dt)
4469 	{
4470 	  res *= (ds_t) get_dep_weak (ds, dt);
4471 	  n++;
4472 	}
4473 
4474       if (dt == LAST_SPEC_TYPE)
4475 	break;
4476       dt <<= SPEC_TYPE_SHIFT;
4477     }
4478   while (1);
4479 
4480   gcc_assert (n);
4481   while (--n)
4482     res /= MAX_DEP_WEAK;
4483 
4484   if (res < MIN_DEP_WEAK)
4485     res = MIN_DEP_WEAK;
4486 
4487   gcc_assert (res <= MAX_DEP_WEAK);
4488 
4489   return (dw_t) res;
4490 }
4491 
4492 /* Return a dep status that contains all speculation types of DS.  */
4493 ds_t
ds_get_speculation_types(ds_t ds)4494 ds_get_speculation_types (ds_t ds)
4495 {
4496   if (ds & BEGIN_DATA)
4497     ds |= BEGIN_DATA;
4498   if (ds & BE_IN_DATA)
4499     ds |= BE_IN_DATA;
4500   if (ds & BEGIN_CONTROL)
4501     ds |= BEGIN_CONTROL;
4502   if (ds & BE_IN_CONTROL)
4503     ds |= BE_IN_CONTROL;
4504 
4505   return ds & SPECULATIVE;
4506 }
4507 
4508 /* Return a dep status that contains maximal weakness for each speculation
4509    type present in DS.  */
4510 ds_t
ds_get_max_dep_weak(ds_t ds)4511 ds_get_max_dep_weak (ds_t ds)
4512 {
4513   if (ds & BEGIN_DATA)
4514     ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4515   if (ds & BE_IN_DATA)
4516     ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4517   if (ds & BEGIN_CONTROL)
4518     ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4519   if (ds & BE_IN_CONTROL)
4520     ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4521 
4522   return ds;
4523 }
4524 
4525 /* Dump information about the dependence status S.  */
4526 static void
dump_ds(FILE * f,ds_t s)4527 dump_ds (FILE *f, ds_t s)
4528 {
4529   fprintf (f, "{");
4530 
4531   if (s & BEGIN_DATA)
4532     fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4533   if (s & BE_IN_DATA)
4534     fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4535   if (s & BEGIN_CONTROL)
4536     fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4537   if (s & BE_IN_CONTROL)
4538     fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4539 
4540   if (s & HARD_DEP)
4541     fprintf (f, "HARD_DEP; ");
4542 
4543   if (s & DEP_TRUE)
4544     fprintf (f, "DEP_TRUE; ");
4545   if (s & DEP_OUTPUT)
4546     fprintf (f, "DEP_OUTPUT; ");
4547   if (s & DEP_ANTI)
4548     fprintf (f, "DEP_ANTI; ");
4549   if (s & DEP_CONTROL)
4550     fprintf (f, "DEP_CONTROL; ");
4551 
4552   fprintf (f, "}");
4553 }
4554 
4555 DEBUG_FUNCTION void
debug_ds(ds_t s)4556 debug_ds (ds_t s)
4557 {
4558   dump_ds (stderr, s);
4559   fprintf (stderr, "\n");
4560 }
4561 
4562 /* Verify that dependence type and status are consistent.
4563    If RELAXED_P is true, then skip dep_weakness checks.  */
4564 static void
check_dep(dep_t dep,bool relaxed_p)4565 check_dep (dep_t dep, bool relaxed_p)
4566 {
4567   enum reg_note dt = DEP_TYPE (dep);
4568   ds_t ds = DEP_STATUS (dep);
4569 
4570   gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4571 
4572   if (!(current_sched_info->flags & USE_DEPS_LIST))
4573     {
4574       gcc_assert (ds == 0);
4575       return;
4576     }
4577 
4578   /* Check that dependence type contains the same bits as the status.  */
4579   if (dt == REG_DEP_TRUE)
4580     gcc_assert (ds & DEP_TRUE);
4581   else if (dt == REG_DEP_OUTPUT)
4582     gcc_assert ((ds & DEP_OUTPUT)
4583 		&& !(ds & DEP_TRUE));
4584   else if (dt == REG_DEP_ANTI)
4585     gcc_assert ((ds & DEP_ANTI)
4586 		&& !(ds & (DEP_OUTPUT | DEP_TRUE)));
4587   else
4588     gcc_assert (dt == REG_DEP_CONTROL
4589 		&& (ds & DEP_CONTROL)
4590 		&& !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4591 
4592   /* HARD_DEP can not appear in dep_status of a link.  */
4593   gcc_assert (!(ds & HARD_DEP));
4594 
4595   /* Check that dependence status is set correctly when speculation is not
4596      supported.  */
4597   if (!sched_deps_info->generate_spec_deps)
4598     gcc_assert (!(ds & SPECULATIVE));
4599   else if (ds & SPECULATIVE)
4600     {
4601       if (!relaxed_p)
4602 	{
4603 	  ds_t type = FIRST_SPEC_TYPE;
4604 
4605 	  /* Check that dependence weakness is in proper range.  */
4606 	  do
4607 	    {
4608 	      if (ds & type)
4609 		get_dep_weak (ds, type);
4610 
4611 	      if (type == LAST_SPEC_TYPE)
4612 		break;
4613 	      type <<= SPEC_TYPE_SHIFT;
4614 	    }
4615 	  while (1);
4616 	}
4617 
4618       if (ds & BEGIN_SPEC)
4619 	{
4620 	  /* Only true dependence can be data speculative.  */
4621 	  if (ds & BEGIN_DATA)
4622 	    gcc_assert (ds & DEP_TRUE);
4623 
4624 	  /* Control dependencies in the insn scheduler are represented by
4625 	     anti-dependencies, therefore only anti dependence can be
4626 	     control speculative.  */
4627 	  if (ds & BEGIN_CONTROL)
4628 	    gcc_assert (ds & DEP_ANTI);
4629 	}
4630       else
4631 	{
4632 	  /* Subsequent speculations should resolve true dependencies.  */
4633 	  gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4634 	}
4635 
4636       /* Check that true and anti dependencies can't have other speculative
4637 	 statuses.  */
4638       if (ds & DEP_TRUE)
4639 	gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4640       /* An output dependence can't be speculative at all.  */
4641       gcc_assert (!(ds & DEP_OUTPUT));
4642       if (ds & DEP_ANTI)
4643 	gcc_assert (ds & BEGIN_CONTROL);
4644     }
4645 }
4646 
4647 /* The following code discovers opportunities to switch a memory reference
4648    and an increment by modifying the address.  We ensure that this is done
4649    only for dependencies that are only used to show a single register
4650    dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4651    instruction involved is subject to only one dep that can cause a pattern
4652    change.
4653 
4654    When we discover a suitable dependency, we fill in the dep_replacement
4655    structure to show how to modify the memory reference.  */
4656 
4657 /* Holds information about a pair of memory reference and register increment
4658    insns which depend on each other, but could possibly be interchanged.  */
4659 struct mem_inc_info
4660 {
4661   rtx_insn *inc_insn;
4662   rtx_insn *mem_insn;
4663 
4664   rtx *mem_loc;
4665   /* A register occurring in the memory address for which we wish to break
4666      the dependence.  This must be identical to the destination register of
4667      the increment.  */
4668   rtx mem_reg0;
4669   /* Any kind of index that is added to that register.  */
4670   rtx mem_index;
4671   /* The constant offset used in the memory address.  */
4672   HOST_WIDE_INT mem_constant;
4673   /* The constant added in the increment insn.  Negated if the increment is
4674      after the memory address.  */
4675   HOST_WIDE_INT inc_constant;
4676   /* The source register used in the increment.  May be different from mem_reg0
4677      if the increment occurs before the memory address.  */
4678   rtx inc_input;
4679 };
4680 
4681 /* Verify that the memory location described in MII can be replaced with
4682    one using NEW_ADDR.  Return the new memory reference or NULL_RTX.  The
4683    insn remains unchanged by this function.  */
4684 
4685 static rtx
attempt_change(struct mem_inc_info * mii,rtx new_addr)4686 attempt_change (struct mem_inc_info *mii, rtx new_addr)
4687 {
4688   rtx mem = *mii->mem_loc;
4689   rtx new_mem;
4690 
4691   /* Jump through a lot of hoops to keep the attributes up to date.  We
4692      do not want to call one of the change address variants that take
4693      an offset even though we know the offset in many cases.  These
4694      assume you are changing where the address is pointing by the
4695      offset.  */
4696   new_mem = replace_equiv_address_nv (mem, new_addr);
4697   if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4698     {
4699       if (sched_verbose >= 5)
4700 	fprintf (sched_dump, "validation failure\n");
4701       return NULL_RTX;
4702     }
4703 
4704   /* Put back the old one.  */
4705   validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4706 
4707   return new_mem;
4708 }
4709 
4710 /* Return true if INSN is of a form "a = b op c" where a and b are
4711    regs.  op is + if c is a reg and +|- if c is a const.  Fill in
4712    informantion in MII about what is found.
4713    BEFORE_MEM indicates whether the increment is found before or after
4714    a corresponding memory reference.  */
4715 
4716 static bool
parse_add_or_inc(struct mem_inc_info * mii,rtx_insn * insn,bool before_mem)4717 parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
4718 {
4719   rtx pat = single_set (insn);
4720   rtx src, cst;
4721   bool regs_equal;
4722 
4723   if (RTX_FRAME_RELATED_P (insn) || !pat)
4724     return false;
4725 
4726   /* Do not allow breaking data dependencies for insns that are marked
4727      with REG_STACK_CHECK.  */
4728   if (find_reg_note (insn, REG_STACK_CHECK, NULL))
4729     return false;
4730 
4731   /* Result must be single reg.  */
4732   if (!REG_P (SET_DEST (pat)))
4733     return false;
4734 
4735   if (GET_CODE (SET_SRC (pat)) != PLUS)
4736     return false;
4737 
4738   mii->inc_insn = insn;
4739   src = SET_SRC (pat);
4740   mii->inc_input = XEXP (src, 0);
4741 
4742   if (!REG_P (XEXP (src, 0)))
4743     return false;
4744 
4745   if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4746     return false;
4747 
4748   cst = XEXP (src, 1);
4749   if (!CONST_INT_P (cst))
4750     return false;
4751   mii->inc_constant = INTVAL (cst);
4752 
4753   regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4754 
4755   if (!before_mem)
4756     {
4757       mii->inc_constant = -mii->inc_constant;
4758       if (!regs_equal)
4759 	return false;
4760     }
4761 
4762   if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
4763     {
4764       /* Note that the sign has already been reversed for !before_mem.  */
4765       if (STACK_GROWS_DOWNWARD)
4766 	return mii->inc_constant > 0;
4767       else
4768 	return mii->inc_constant < 0;
4769     }
4770   return true;
4771 }
4772 
4773 /* Once a suitable mem reference has been found and the corresponding data
4774    in MII has been filled in, this function is called to find a suitable
4775    add or inc insn involving the register we found in the memory
4776    reference.  */
4777 
4778 static bool
find_inc(struct mem_inc_info * mii,bool backwards)4779 find_inc (struct mem_inc_info *mii, bool backwards)
4780 {
4781   sd_iterator_def sd_it;
4782   dep_t dep;
4783 
4784   sd_it = sd_iterator_start (mii->mem_insn,
4785 			     backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4786   while (sd_iterator_cond (&sd_it, &dep))
4787     {
4788       dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
4789       rtx_insn *pro = DEP_PRO (dep);
4790       rtx_insn *con = DEP_CON (dep);
4791       rtx_insn *inc_cand = backwards ? pro : con;
4792       if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4793 	goto next;
4794       if (parse_add_or_inc (mii, inc_cand, backwards))
4795 	{
4796 	  struct dep_replacement *desc;
4797 	  df_ref def;
4798 	  rtx newaddr, newmem;
4799 
4800 	  if (sched_verbose >= 5)
4801 	    fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4802 		     INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4803 
4804 	  /* Need to assure that none of the operands of the inc
4805 	     instruction are assigned to by the mem insn.  */
4806 	  FOR_EACH_INSN_DEF (def, mii->mem_insn)
4807 	    if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4808 		|| reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4809 	      {
4810 		if (sched_verbose >= 5)
4811 		  fprintf (sched_dump,
4812 			   "inc conflicts with store failure.\n");
4813 		goto next;
4814 	      }
4815 
4816 	  newaddr = mii->inc_input;
4817 	  if (mii->mem_index != NULL_RTX)
4818 	    newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4819 				    mii->mem_index);
4820 	  newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4821 				   mii->mem_constant + mii->inc_constant);
4822 	  newmem = attempt_change (mii, newaddr);
4823 	  if (newmem == NULL_RTX)
4824 	    goto next;
4825 	  if (sched_verbose >= 5)
4826 	    fprintf (sched_dump, "successful address replacement\n");
4827 	  desc = XCNEW (struct dep_replacement);
4828 	  DEP_REPLACE (dep) = desc;
4829 	  desc->loc = mii->mem_loc;
4830 	  desc->newval = newmem;
4831 	  desc->orig = *desc->loc;
4832 	  desc->insn = mii->mem_insn;
4833 	  move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4834 			 INSN_SPEC_BACK_DEPS (con));
4835 	  if (backwards)
4836 	    {
4837 	      FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
4838 		add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4839 				  REG_DEP_TRUE);
4840 	    }
4841 	  else
4842 	    {
4843 	      FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
4844 		add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4845 				  REG_DEP_ANTI);
4846 	    }
4847 	  return true;
4848 	}
4849     next:
4850       sd_iterator_next (&sd_it);
4851     }
4852   return false;
4853 }
4854 
4855 /* A recursive function that walks ADDRESS_OF_X to find memory references
4856    which could be modified during scheduling.  We call find_inc for each
4857    one we find that has a recognizable form.  MII holds information about
4858    the pair of memory/increment instructions.
4859    We ensure that every instruction with a memory reference (which will be
4860    the location of the replacement) is assigned at most one breakable
4861    dependency.  */
4862 
4863 static bool
find_mem(struct mem_inc_info * mii,rtx * address_of_x)4864 find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4865 {
4866   rtx x = *address_of_x;
4867   enum rtx_code code = GET_CODE (x);
4868   const char *const fmt = GET_RTX_FORMAT (code);
4869   int i;
4870 
4871   if (code == MEM)
4872     {
4873       rtx reg0 = XEXP (x, 0);
4874 
4875       mii->mem_loc = address_of_x;
4876       mii->mem_index = NULL_RTX;
4877       mii->mem_constant = 0;
4878       if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4879 	{
4880 	  mii->mem_constant = INTVAL (XEXP (reg0, 1));
4881 	  reg0 = XEXP (reg0, 0);
4882 	}
4883       if (GET_CODE (reg0) == PLUS)
4884 	{
4885 	  mii->mem_index = XEXP (reg0, 1);
4886 	  reg0 = XEXP (reg0, 0);
4887 	}
4888       if (REG_P (reg0))
4889 	{
4890 	  df_ref use;
4891 	  int occurrences = 0;
4892 
4893 	  /* Make sure this reg appears only once in this insn.  Can't use
4894 	     count_occurrences since that only works for pseudos.  */
4895 	  FOR_EACH_INSN_USE (use, mii->mem_insn)
4896 	    if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
4897 	      if (++occurrences > 1)
4898 		{
4899 		  if (sched_verbose >= 5)
4900 		    fprintf (sched_dump, "mem count failure\n");
4901 		  return false;
4902 		}
4903 
4904 	  mii->mem_reg0 = reg0;
4905 	  return find_inc (mii, true) || find_inc (mii, false);
4906 	}
4907       return false;
4908     }
4909 
4910   if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4911     {
4912       /* If REG occurs inside a MEM used in a bit-field reference,
4913 	 that is unacceptable.  */
4914       return false;
4915     }
4916 
4917   /* Time for some deep diving.  */
4918   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4919     {
4920       if (fmt[i] == 'e')
4921 	{
4922 	  if (find_mem (mii, &XEXP (x, i)))
4923 	    return true;
4924 	}
4925       else if (fmt[i] == 'E')
4926 	{
4927 	  int j;
4928 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4929 	    if (find_mem (mii, &XVECEXP (x, i, j)))
4930 	      return true;
4931 	}
4932     }
4933   return false;
4934 }
4935 
4936 
4937 /* Examine the instructions between HEAD and TAIL and try to find
4938    dependencies that can be broken by modifying one of the patterns.  */
4939 
4940 void
find_modifiable_mems(rtx_insn * head,rtx_insn * tail)4941 find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
4942 {
4943   rtx_insn *insn, *next_tail = NEXT_INSN (tail);
4944   int success_in_block = 0;
4945 
4946   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4947     {
4948       struct mem_inc_info mii;
4949 
4950       if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4951 	continue;
4952 
4953       mii.mem_insn = insn;
4954       if (find_mem (&mii, &PATTERN (insn)))
4955 	success_in_block++;
4956     }
4957   if (success_in_block && sched_verbose >= 5)
4958     fprintf (sched_dump, "%d candidates for address modification found.\n",
4959 	     success_in_block);
4960 }
4961 
4962 #endif /* INSN_SCHEDULING */
4963