1 /* Instruction scheduling pass.  This file computes dependencies between
2    instructions.
3    Copyright (C) 1992-2013 Free Software Foundation, Inc.
4    Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5    and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 
7 This file is part of GCC.
8 
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13 
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
17 for more details.
18 
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3.  If not see
21 <http://www.gnu.org/licenses/>.  */
22 
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "diagnostic-core.h"
28 #include "rtl.h"
29 #include "tree.h"		/* FIXME: Used by call_may_noreturn_p.  */
30 #include "tm_p.h"
31 #include "hard-reg-set.h"
32 #include "regs.h"
33 #include "function.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
37 #include "except.h"
38 #include "recog.h"
39 #include "emit-rtl.h"
40 #include "sched-int.h"
41 #include "params.h"
42 #include "cselib.h"
43 #include "ira.h"
44 #include "target.h"
45 
46 #ifdef INSN_SCHEDULING
47 
48 #ifdef ENABLE_CHECKING
49 #define CHECK (true)
50 #else
51 #define CHECK (false)
52 #endif
53 
54 /* Holds current parameters for the dependency analyzer.  */
55 struct sched_deps_info_def *sched_deps_info;
56 
57 /* The data is specific to the Haifa scheduler.  */
58 vec<haifa_deps_insn_data_def>
59     h_d_i_d = vNULL;
60 
61 /* Return the major type present in the DS.  */
62 enum reg_note
ds_to_dk(ds_t ds)63 ds_to_dk (ds_t ds)
64 {
65   if (ds & DEP_TRUE)
66     return REG_DEP_TRUE;
67 
68   if (ds & DEP_OUTPUT)
69     return REG_DEP_OUTPUT;
70 
71   if (ds & DEP_CONTROL)
72     return REG_DEP_CONTROL;
73 
74   gcc_assert (ds & DEP_ANTI);
75 
76   return REG_DEP_ANTI;
77 }
78 
79 /* Return equivalent dep_status.  */
80 ds_t
dk_to_ds(enum reg_note dk)81 dk_to_ds (enum reg_note dk)
82 {
83   switch (dk)
84     {
85     case REG_DEP_TRUE:
86       return DEP_TRUE;
87 
88     case REG_DEP_OUTPUT:
89       return DEP_OUTPUT;
90 
91     case REG_DEP_CONTROL:
92       return DEP_CONTROL;
93 
94     default:
95       gcc_assert (dk == REG_DEP_ANTI);
96       return DEP_ANTI;
97     }
98 }
99 
100 /* Functions to operate with dependence information container - dep_t.  */
101 
102 /* Init DEP with the arguments.  */
103 void
init_dep_1(dep_t dep,rtx pro,rtx con,enum reg_note type,ds_t ds)104 init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
105 {
106   DEP_PRO (dep) = pro;
107   DEP_CON (dep) = con;
108   DEP_TYPE (dep) = type;
109   DEP_STATUS (dep) = ds;
110   DEP_COST (dep) = UNKNOWN_DEP_COST;
111   DEP_NONREG (dep) = 0;
112   DEP_MULTIPLE (dep) = 0;
113   DEP_REPLACE (dep) = NULL;
114 }
115 
116 /* Init DEP with the arguments.
117    While most of the scheduler (including targets) only need the major type
118    of the dependency, it is convenient to hide full dep_status from them.  */
119 void
init_dep(dep_t dep,rtx pro,rtx con,enum reg_note kind)120 init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
121 {
122   ds_t ds;
123 
124   if ((current_sched_info->flags & USE_DEPS_LIST))
125     ds = dk_to_ds (kind);
126   else
127     ds = 0;
128 
129   init_dep_1 (dep, pro, con, kind, ds);
130 }
131 
132 /* Make a copy of FROM in TO.  */
133 static void
copy_dep(dep_t to,dep_t from)134 copy_dep (dep_t to, dep_t from)
135 {
136   memcpy (to, from, sizeof (*to));
137 }
138 
139 static void dump_ds (FILE *, ds_t);
140 
141 /* Define flags for dump_dep ().  */
142 
143 /* Dump producer of the dependence.  */
144 #define DUMP_DEP_PRO (2)
145 
146 /* Dump consumer of the dependence.  */
147 #define DUMP_DEP_CON (4)
148 
149 /* Dump type of the dependence.  */
150 #define DUMP_DEP_TYPE (8)
151 
152 /* Dump status of the dependence.  */
153 #define DUMP_DEP_STATUS (16)
154 
155 /* Dump all information about the dependence.  */
156 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE	\
157 		      |DUMP_DEP_STATUS)
158 
159 /* Dump DEP to DUMP.
160    FLAGS is a bit mask specifying what information about DEP needs
161    to be printed.
162    If FLAGS has the very first bit set, then dump all information about DEP
163    and propagate this bit into the callee dump functions.  */
164 static void
dump_dep(FILE * dump,dep_t dep,int flags)165 dump_dep (FILE *dump, dep_t dep, int flags)
166 {
167   if (flags & 1)
168     flags |= DUMP_DEP_ALL;
169 
170   fprintf (dump, "<");
171 
172   if (flags & DUMP_DEP_PRO)
173     fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
174 
175   if (flags & DUMP_DEP_CON)
176     fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
177 
178   if (flags & DUMP_DEP_TYPE)
179     {
180       char t;
181       enum reg_note type = DEP_TYPE (dep);
182 
183       switch (type)
184 	{
185 	case REG_DEP_TRUE:
186 	  t = 't';
187 	  break;
188 
189 	case REG_DEP_OUTPUT:
190 	  t = 'o';
191 	  break;
192 
193 	case REG_DEP_CONTROL:
194 	  t = 'c';
195 	  break;
196 
197 	case REG_DEP_ANTI:
198 	  t = 'a';
199 	  break;
200 
201 	default:
202 	  gcc_unreachable ();
203 	  break;
204 	}
205 
206       fprintf (dump, "%c; ", t);
207     }
208 
209   if (flags & DUMP_DEP_STATUS)
210     {
211       if (current_sched_info->flags & USE_DEPS_LIST)
212 	dump_ds (dump, DEP_STATUS (dep));
213     }
214 
215   fprintf (dump, ">");
216 }
217 
218 /* Default flags for dump_dep ().  */
219 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
220 
221 /* Dump all fields of DEP to STDERR.  */
222 void
sd_debug_dep(dep_t dep)223 sd_debug_dep (dep_t dep)
224 {
225   dump_dep (stderr, dep, 1);
226   fprintf (stderr, "\n");
227 }
228 
229 /* Determine whether DEP is a dependency link of a non-debug insn on a
230    debug insn.  */
231 
232 static inline bool
depl_on_debug_p(dep_link_t dep)233 depl_on_debug_p (dep_link_t dep)
234 {
235   return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
236 	  && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
237 }
238 
239 /* Functions to operate with a single link from the dependencies lists -
240    dep_link_t.  */
241 
242 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
243    PREV_NEXT_P.  */
244 static void
attach_dep_link(dep_link_t l,dep_link_t * prev_nextp)245 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
246 {
247   dep_link_t next = *prev_nextp;
248 
249   gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
250 	      && DEP_LINK_NEXT (l) == NULL);
251 
252   /* Init node being inserted.  */
253   DEP_LINK_PREV_NEXTP (l) = prev_nextp;
254   DEP_LINK_NEXT (l) = next;
255 
256   /* Fix next node.  */
257   if (next != NULL)
258     {
259       gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
260 
261       DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
262     }
263 
264   /* Fix prev node.  */
265   *prev_nextp = l;
266 }
267 
268 /* Add dep_link LINK to deps_list L.  */
269 static void
add_to_deps_list(dep_link_t link,deps_list_t l)270 add_to_deps_list (dep_link_t link, deps_list_t l)
271 {
272   attach_dep_link (link, &DEPS_LIST_FIRST (l));
273 
274   /* Don't count debug deps.  */
275   if (!depl_on_debug_p (link))
276     ++DEPS_LIST_N_LINKS (l);
277 }
278 
279 /* Detach dep_link L from the list.  */
280 static void
detach_dep_link(dep_link_t l)281 detach_dep_link (dep_link_t l)
282 {
283   dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
284   dep_link_t next = DEP_LINK_NEXT (l);
285 
286   *prev_nextp = next;
287 
288   if (next != NULL)
289     DEP_LINK_PREV_NEXTP (next) = prev_nextp;
290 
291   DEP_LINK_PREV_NEXTP (l) = NULL;
292   DEP_LINK_NEXT (l) = NULL;
293 }
294 
295 /* Remove link LINK from list LIST.  */
296 static void
remove_from_deps_list(dep_link_t link,deps_list_t list)297 remove_from_deps_list (dep_link_t link, deps_list_t list)
298 {
299   detach_dep_link (link);
300 
301   /* Don't count debug deps.  */
302   if (!depl_on_debug_p (link))
303     --DEPS_LIST_N_LINKS (list);
304 }
305 
306 /* Move link LINK from list FROM to list TO.  */
307 static void
move_dep_link(dep_link_t link,deps_list_t from,deps_list_t to)308 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
309 {
310   remove_from_deps_list (link, from);
311   add_to_deps_list (link, to);
312 }
313 
314 /* Return true of LINK is not attached to any list.  */
315 static bool
dep_link_is_detached_p(dep_link_t link)316 dep_link_is_detached_p (dep_link_t link)
317 {
318   return DEP_LINK_PREV_NEXTP (link) == NULL;
319 }
320 
321 /* Pool to hold all dependency nodes (dep_node_t).  */
322 static alloc_pool dn_pool;
323 
324 /* Number of dep_nodes out there.  */
325 static int dn_pool_diff = 0;
326 
327 /* Create a dep_node.  */
328 static dep_node_t
create_dep_node(void)329 create_dep_node (void)
330 {
331   dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
332   dep_link_t back = DEP_NODE_BACK (n);
333   dep_link_t forw = DEP_NODE_FORW (n);
334 
335   DEP_LINK_NODE (back) = n;
336   DEP_LINK_NEXT (back) = NULL;
337   DEP_LINK_PREV_NEXTP (back) = NULL;
338 
339   DEP_LINK_NODE (forw) = n;
340   DEP_LINK_NEXT (forw) = NULL;
341   DEP_LINK_PREV_NEXTP (forw) = NULL;
342 
343   ++dn_pool_diff;
344 
345   return n;
346 }
347 
348 /* Delete dep_node N.  N must not be connected to any deps_list.  */
349 static void
delete_dep_node(dep_node_t n)350 delete_dep_node (dep_node_t n)
351 {
352   gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
353 	      && dep_link_is_detached_p (DEP_NODE_FORW (n)));
354 
355   XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
356 
357   --dn_pool_diff;
358 
359   pool_free (dn_pool, n);
360 }
361 
362 /* Pool to hold dependencies lists (deps_list_t).  */
363 static alloc_pool dl_pool;
364 
365 /* Number of deps_lists out there.  */
366 static int dl_pool_diff = 0;
367 
368 /* Functions to operate with dependences lists - deps_list_t.  */
369 
370 /* Return true if list L is empty.  */
371 static bool
deps_list_empty_p(deps_list_t l)372 deps_list_empty_p (deps_list_t l)
373 {
374   return DEPS_LIST_N_LINKS (l) == 0;
375 }
376 
377 /* Create a new deps_list.  */
378 static deps_list_t
create_deps_list(void)379 create_deps_list (void)
380 {
381   deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
382 
383   DEPS_LIST_FIRST (l) = NULL;
384   DEPS_LIST_N_LINKS (l) = 0;
385 
386   ++dl_pool_diff;
387   return l;
388 }
389 
390 /* Free deps_list L.  */
391 static void
free_deps_list(deps_list_t l)392 free_deps_list (deps_list_t l)
393 {
394   gcc_assert (deps_list_empty_p (l));
395 
396   --dl_pool_diff;
397 
398   pool_free (dl_pool, l);
399 }
400 
401 /* Return true if there is no dep_nodes and deps_lists out there.
402    After the region is scheduled all the dependency nodes and lists
403    should [generally] be returned to pool.  */
404 bool
deps_pools_are_empty_p(void)405 deps_pools_are_empty_p (void)
406 {
407   return dn_pool_diff == 0 && dl_pool_diff == 0;
408 }
409 
410 /* Remove all elements from L.  */
411 static void
clear_deps_list(deps_list_t l)412 clear_deps_list (deps_list_t l)
413 {
414   do
415     {
416       dep_link_t link = DEPS_LIST_FIRST (l);
417 
418       if (link == NULL)
419 	break;
420 
421       remove_from_deps_list (link, l);
422     }
423   while (1);
424 }
425 
426 /* Decide whether a dependency should be treated as a hard or a speculative
427    dependency.  */
428 static bool
dep_spec_p(dep_t dep)429 dep_spec_p (dep_t dep)
430 {
431   if (current_sched_info->flags & DO_SPECULATION)
432     {
433       if (DEP_STATUS (dep) & SPECULATIVE)
434 	return true;
435     }
436   if (current_sched_info->flags & DO_PREDICATION)
437     {
438       if (DEP_TYPE (dep) == REG_DEP_CONTROL)
439 	return true;
440     }
441   if (DEP_REPLACE (dep) != NULL)
442     return true;
443   return false;
444 }
445 
446 static regset reg_pending_sets;
447 static regset reg_pending_clobbers;
448 static regset reg_pending_uses;
449 static regset reg_pending_control_uses;
450 static enum reg_pending_barrier_mode reg_pending_barrier;
451 
452 /* Hard registers implicitly clobbered or used (or may be implicitly
453    clobbered or used) by the currently analyzed insn.  For example,
454    insn in its constraint has one register class.  Even if there is
455    currently no hard register in the insn, the particular hard
456    register will be in the insn after reload pass because the
457    constraint requires it.  */
458 static HARD_REG_SET implicit_reg_pending_clobbers;
459 static HARD_REG_SET implicit_reg_pending_uses;
460 
461 /* To speed up the test for duplicate dependency links we keep a
462    record of dependencies created by add_dependence when the average
463    number of instructions in a basic block is very large.
464 
465    Studies have shown that there is typically around 5 instructions between
466    branches for typical C code.  So we can make a guess that the average
467    basic block is approximately 5 instructions long; we will choose 100X
468    the average size as a very large basic block.
469 
470    Each insn has associated bitmaps for its dependencies.  Each bitmap
471    has enough entries to represent a dependency on any other insn in
472    the insn chain.  All bitmap for true dependencies cache is
473    allocated then the rest two ones are also allocated.  */
474 static bitmap_head *true_dependency_cache = NULL;
475 static bitmap_head *output_dependency_cache = NULL;
476 static bitmap_head *anti_dependency_cache = NULL;
477 static bitmap_head *control_dependency_cache = NULL;
478 static bitmap_head *spec_dependency_cache = NULL;
479 static int cache_size;
480 
481 /* True if we should mark added dependencies as a non-register deps.  */
482 static bool mark_as_hard;
483 
484 static int deps_may_trap_p (const_rtx);
485 static void add_dependence_1 (rtx, rtx, enum reg_note);
486 static void add_dependence_list (rtx, rtx, int, enum reg_note, bool);
487 static void add_dependence_list_and_free (struct deps_desc *, rtx,
488 					  rtx *, int, enum reg_note, bool);
489 static void delete_all_dependences (rtx);
490 static void chain_to_prev_insn (rtx);
491 
492 static void flush_pending_lists (struct deps_desc *, rtx, int, int);
493 static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
494 static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
495 static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
496 
497 static bool sched_has_condition_p (const_rtx);
498 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
499 
500 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
501 							  rtx, rtx);
502 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
503 
504 #ifdef ENABLE_CHECKING
505 static void check_dep (dep_t, bool);
506 #endif
507 
508 /* Return nonzero if a load of the memory reference MEM can cause a trap.  */
509 
510 static int
deps_may_trap_p(const_rtx mem)511 deps_may_trap_p (const_rtx mem)
512 {
513   const_rtx addr = XEXP (mem, 0);
514 
515   if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
516     {
517       const_rtx t = get_reg_known_value (REGNO (addr));
518       if (t)
519 	addr = t;
520     }
521   return rtx_addr_can_trap_p (addr);
522 }
523 
524 
525 /* Find the condition under which INSN is executed.  If REV is not NULL,
526    it is set to TRUE when the returned comparison should be reversed
527    to get the actual condition.  */
528 static rtx
sched_get_condition_with_rev_uncached(const_rtx insn,bool * rev)529 sched_get_condition_with_rev_uncached (const_rtx insn, bool *rev)
530 {
531   rtx pat = PATTERN (insn);
532   rtx src;
533 
534   if (rev)
535     *rev = false;
536 
537   if (GET_CODE (pat) == COND_EXEC)
538     return COND_EXEC_TEST (pat);
539 
540   if (!any_condjump_p (insn) || !onlyjump_p (insn))
541     return 0;
542 
543   src = SET_SRC (pc_set (insn));
544 
545   if (XEXP (src, 2) == pc_rtx)
546     return XEXP (src, 0);
547   else if (XEXP (src, 1) == pc_rtx)
548     {
549       rtx cond = XEXP (src, 0);
550       enum rtx_code revcode = reversed_comparison_code (cond, insn);
551 
552       if (revcode == UNKNOWN)
553 	return 0;
554 
555       if (rev)
556 	*rev = true;
557       return cond;
558     }
559 
560   return 0;
561 }
562 
563 /* Return the condition under which INSN does not execute (i.e.  the
564    not-taken condition for a conditional branch), or NULL if we cannot
565    find such a condition.  The caller should make a copy of the condition
566    before using it.  */
567 rtx
sched_get_reverse_condition_uncached(const_rtx insn)568 sched_get_reverse_condition_uncached (const_rtx insn)
569 {
570   bool rev;
571   rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
572   if (cond == NULL_RTX)
573     return cond;
574   if (!rev)
575     {
576       enum rtx_code revcode = reversed_comparison_code (cond, insn);
577       cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
578 			     XEXP (cond, 0),
579 			     XEXP (cond, 1));
580     }
581   return cond;
582 }
583 
584 /* Caching variant of sched_get_condition_with_rev_uncached.
585    We only do actual work the first time we come here for an insn; the
586    results are cached in INSN_CACHED_COND and INSN_REVERSE_COND.  */
587 static rtx
sched_get_condition_with_rev(const_rtx insn,bool * rev)588 sched_get_condition_with_rev (const_rtx insn, bool *rev)
589 {
590   bool tmp;
591 
592   if (INSN_LUID (insn) == 0)
593     return sched_get_condition_with_rev_uncached (insn, rev);
594 
595   if (INSN_CACHED_COND (insn) == const_true_rtx)
596     return NULL_RTX;
597 
598   if (INSN_CACHED_COND (insn) != NULL_RTX)
599     {
600       if (rev)
601 	*rev = INSN_REVERSE_COND (insn);
602       return INSN_CACHED_COND (insn);
603     }
604 
605   INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
606   INSN_REVERSE_COND (insn) = tmp;
607 
608   if (INSN_CACHED_COND (insn) == NULL_RTX)
609     {
610       INSN_CACHED_COND (insn) = const_true_rtx;
611       return NULL_RTX;
612     }
613 
614   if (rev)
615     *rev = INSN_REVERSE_COND (insn);
616   return INSN_CACHED_COND (insn);
617 }
618 
619 /* True when we can find a condition under which INSN is executed.  */
620 static bool
sched_has_condition_p(const_rtx insn)621 sched_has_condition_p (const_rtx insn)
622 {
623   return !! sched_get_condition_with_rev (insn, NULL);
624 }
625 
626 
627 
628 /* Return nonzero if conditions COND1 and COND2 can never be both true.  */
629 static int
conditions_mutex_p(const_rtx cond1,const_rtx cond2,bool rev1,bool rev2)630 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
631 {
632   if (COMPARISON_P (cond1)
633       && COMPARISON_P (cond2)
634       && GET_CODE (cond1) ==
635 	  (rev1==rev2
636 	  ? reversed_comparison_code (cond2, NULL)
637 	  : GET_CODE (cond2))
638       && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
639       && XEXP (cond1, 1) == XEXP (cond2, 1))
640     return 1;
641   return 0;
642 }
643 
644 /* Return true if insn1 and insn2 can never depend on one another because
645    the conditions under which they are executed are mutually exclusive.  */
646 bool
sched_insns_conditions_mutex_p(const_rtx insn1,const_rtx insn2)647 sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
648 {
649   rtx cond1, cond2;
650   bool rev1 = false, rev2 = false;
651 
652   /* df doesn't handle conditional lifetimes entirely correctly;
653      calls mess up the conditional lifetimes.  */
654   if (!CALL_P (insn1) && !CALL_P (insn2))
655     {
656       cond1 = sched_get_condition_with_rev (insn1, &rev1);
657       cond2 = sched_get_condition_with_rev (insn2, &rev2);
658       if (cond1 && cond2
659 	  && conditions_mutex_p (cond1, cond2, rev1, rev2)
660 	  /* Make sure first instruction doesn't affect condition of second
661 	     instruction if switched.  */
662 	  && !modified_in_p (cond1, insn2)
663 	  /* Make sure second instruction doesn't affect condition of first
664 	     instruction if switched.  */
665 	  && !modified_in_p (cond2, insn1))
666 	return true;
667     }
668   return false;
669 }
670 
671 
672 /* Return true if INSN can potentially be speculated with type DS.  */
673 bool
sched_insn_is_legitimate_for_speculation_p(const_rtx insn,ds_t ds)674 sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
675 {
676   if (HAS_INTERNAL_DEP (insn))
677     return false;
678 
679   if (!NONJUMP_INSN_P (insn))
680     return false;
681 
682   if (SCHED_GROUP_P (insn))
683     return false;
684 
685   if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
686     return false;
687 
688   if (side_effects_p (PATTERN (insn)))
689     return false;
690 
691   if (ds & BE_IN_SPEC)
692     /* The following instructions, which depend on a speculatively scheduled
693        instruction, cannot be speculatively scheduled along.  */
694     {
695       if (may_trap_or_fault_p (PATTERN (insn)))
696 	/* If instruction might fault, it cannot be speculatively scheduled.
697 	   For control speculation it's obvious why and for data speculation
698 	   it's because the insn might get wrong input if speculation
699 	   wasn't successful.  */
700 	return false;
701 
702       if ((ds & BE_IN_DATA)
703 	  && sched_has_condition_p (insn))
704 	/* If this is a predicated instruction, then it cannot be
705 	   speculatively scheduled.  See PR35659.  */
706 	return false;
707     }
708 
709   return true;
710 }
711 
712 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
713    initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
714    and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
715    This function is used to switch sd_iterator to the next list.
716    !!! For internal use only.  Might consider moving it to sched-int.h.  */
717 void
sd_next_list(const_rtx insn,sd_list_types_def * types_ptr,deps_list_t * list_ptr,bool * resolved_p_ptr)718 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
719 	      deps_list_t *list_ptr, bool *resolved_p_ptr)
720 {
721   sd_list_types_def types = *types_ptr;
722 
723   if (types & SD_LIST_HARD_BACK)
724     {
725       *list_ptr = INSN_HARD_BACK_DEPS (insn);
726       *resolved_p_ptr = false;
727       *types_ptr = types & ~SD_LIST_HARD_BACK;
728     }
729   else if (types & SD_LIST_SPEC_BACK)
730     {
731       *list_ptr = INSN_SPEC_BACK_DEPS (insn);
732       *resolved_p_ptr = false;
733       *types_ptr = types & ~SD_LIST_SPEC_BACK;
734     }
735   else if (types & SD_LIST_FORW)
736     {
737       *list_ptr = INSN_FORW_DEPS (insn);
738       *resolved_p_ptr = false;
739       *types_ptr = types & ~SD_LIST_FORW;
740     }
741   else if (types & SD_LIST_RES_BACK)
742     {
743       *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
744       *resolved_p_ptr = true;
745       *types_ptr = types & ~SD_LIST_RES_BACK;
746     }
747   else if (types & SD_LIST_RES_FORW)
748     {
749       *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
750       *resolved_p_ptr = true;
751       *types_ptr = types & ~SD_LIST_RES_FORW;
752     }
753   else
754     {
755       *list_ptr = NULL;
756       *resolved_p_ptr = false;
757       *types_ptr = SD_LIST_NONE;
758     }
759 }
760 
761 /* Return the summary size of INSN's lists defined by LIST_TYPES.  */
762 int
sd_lists_size(const_rtx insn,sd_list_types_def list_types)763 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
764 {
765   int size = 0;
766 
767   while (list_types != SD_LIST_NONE)
768     {
769       deps_list_t list;
770       bool resolved_p;
771 
772       sd_next_list (insn, &list_types, &list, &resolved_p);
773       if (list)
774 	size += DEPS_LIST_N_LINKS (list);
775     }
776 
777   return size;
778 }
779 
780 /* Return true if INSN's lists defined by LIST_TYPES are all empty.  */
781 
782 bool
sd_lists_empty_p(const_rtx insn,sd_list_types_def list_types)783 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
784 {
785   while (list_types != SD_LIST_NONE)
786     {
787       deps_list_t list;
788       bool resolved_p;
789 
790       sd_next_list (insn, &list_types, &list, &resolved_p);
791       if (!deps_list_empty_p (list))
792 	return false;
793     }
794 
795   return true;
796 }
797 
798 /* Initialize data for INSN.  */
799 void
sd_init_insn(rtx insn)800 sd_init_insn (rtx insn)
801 {
802   INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
803   INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
804   INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
805   INSN_FORW_DEPS (insn) = create_deps_list ();
806   INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
807 
808   /* ??? It would be nice to allocate dependency caches here.  */
809 }
810 
811 /* Free data for INSN.  */
812 void
sd_finish_insn(rtx insn)813 sd_finish_insn (rtx insn)
814 {
815   /* ??? It would be nice to deallocate dependency caches here.  */
816 
817   free_deps_list (INSN_HARD_BACK_DEPS (insn));
818   INSN_HARD_BACK_DEPS (insn) = NULL;
819 
820   free_deps_list (INSN_SPEC_BACK_DEPS (insn));
821   INSN_SPEC_BACK_DEPS (insn) = NULL;
822 
823   free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
824   INSN_RESOLVED_BACK_DEPS (insn) = NULL;
825 
826   free_deps_list (INSN_FORW_DEPS (insn));
827   INSN_FORW_DEPS (insn) = NULL;
828 
829   free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
830   INSN_RESOLVED_FORW_DEPS (insn) = NULL;
831 }
832 
833 /* Find a dependency between producer PRO and consumer CON.
834    Search through resolved dependency lists if RESOLVED_P is true.
835    If no such dependency is found return NULL,
836    otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
837    with an iterator pointing to it.  */
838 static dep_t
sd_find_dep_between_no_cache(rtx pro,rtx con,bool resolved_p,sd_iterator_def * sd_it_ptr)839 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
840 			      sd_iterator_def *sd_it_ptr)
841 {
842   sd_list_types_def pro_list_type;
843   sd_list_types_def con_list_type;
844   sd_iterator_def sd_it;
845   dep_t dep;
846   bool found_p = false;
847 
848   if (resolved_p)
849     {
850       pro_list_type = SD_LIST_RES_FORW;
851       con_list_type = SD_LIST_RES_BACK;
852     }
853   else
854     {
855       pro_list_type = SD_LIST_FORW;
856       con_list_type = SD_LIST_BACK;
857     }
858 
859   /* Walk through either back list of INSN or forw list of ELEM
860      depending on which one is shorter.  */
861   if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
862     {
863       /* Find the dep_link with producer PRO in consumer's back_deps.  */
864       FOR_EACH_DEP (con, con_list_type, sd_it, dep)
865 	if (DEP_PRO (dep) == pro)
866 	  {
867 	    found_p = true;
868 	    break;
869 	  }
870     }
871   else
872     {
873       /* Find the dep_link with consumer CON in producer's forw_deps.  */
874       FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
875 	if (DEP_CON (dep) == con)
876 	  {
877 	    found_p = true;
878 	    break;
879 	  }
880     }
881 
882   if (found_p)
883     {
884       if (sd_it_ptr != NULL)
885 	*sd_it_ptr = sd_it;
886 
887       return dep;
888     }
889 
890   return NULL;
891 }
892 
893 /* Find a dependency between producer PRO and consumer CON.
894    Use dependency [if available] to check if dependency is present at all.
895    Search through resolved dependency lists if RESOLVED_P is true.
896    If the dependency or NULL if none found.  */
897 dep_t
sd_find_dep_between(rtx pro,rtx con,bool resolved_p)898 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
899 {
900   if (true_dependency_cache != NULL)
901     /* Avoiding the list walk below can cut compile times dramatically
902        for some code.  */
903     {
904       int elem_luid = INSN_LUID (pro);
905       int insn_luid = INSN_LUID (con);
906 
907       if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
908 	  && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
909 	  && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
910 	  && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
911 	return NULL;
912     }
913 
914   return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
915 }
916 
917 /* Add or update  a dependence described by DEP.
918    MEM1 and MEM2, if non-null, correspond to memory locations in case of
919    data speculation.
920 
921    The function returns a value indicating if an old entry has been changed
922    or a new entry has been added to insn's backward deps.
923 
924    This function merely checks if producer and consumer is the same insn
925    and doesn't create a dep in this case.  Actual manipulation of
926    dependence data structures is performed in add_or_update_dep_1.  */
927 static enum DEPS_ADJUST_RESULT
maybe_add_or_update_dep_1(dep_t dep,bool resolved_p,rtx mem1,rtx mem2)928 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
929 {
930   rtx elem = DEP_PRO (dep);
931   rtx insn = DEP_CON (dep);
932 
933   gcc_assert (INSN_P (insn) && INSN_P (elem));
934 
935   /* Don't depend an insn on itself.  */
936   if (insn == elem)
937     {
938       if (sched_deps_info->generate_spec_deps)
939         /* INSN has an internal dependence, which we can't overcome.  */
940         HAS_INTERNAL_DEP (insn) = 1;
941 
942       return DEP_NODEP;
943     }
944 
945   return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
946 }
947 
948 /* Ask dependency caches what needs to be done for dependence DEP.
949    Return DEP_CREATED if new dependence should be created and there is no
950    need to try to find one searching the dependencies lists.
951    Return DEP_PRESENT if there already is a dependence described by DEP and
952    hence nothing is to be done.
953    Return DEP_CHANGED if there already is a dependence, but it should be
954    updated to incorporate additional information from DEP.  */
955 static enum DEPS_ADJUST_RESULT
ask_dependency_caches(dep_t dep)956 ask_dependency_caches (dep_t dep)
957 {
958   int elem_luid = INSN_LUID (DEP_PRO (dep));
959   int insn_luid = INSN_LUID (DEP_CON (dep));
960 
961   gcc_assert (true_dependency_cache != NULL
962 	      && output_dependency_cache != NULL
963 	      && anti_dependency_cache != NULL
964 	      && control_dependency_cache != NULL);
965 
966   if (!(current_sched_info->flags & USE_DEPS_LIST))
967     {
968       enum reg_note present_dep_type;
969 
970       if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
971 	present_dep_type = REG_DEP_TRUE;
972       else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
973 	present_dep_type = REG_DEP_OUTPUT;
974       else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
975 	present_dep_type = REG_DEP_ANTI;
976       else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
977 	present_dep_type = REG_DEP_CONTROL;
978       else
979 	/* There is no existing dep so it should be created.  */
980 	return DEP_CREATED;
981 
982       if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
983 	/* DEP does not add anything to the existing dependence.  */
984 	return DEP_PRESENT;
985     }
986   else
987     {
988       ds_t present_dep_types = 0;
989 
990       if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
991 	present_dep_types |= DEP_TRUE;
992       if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
993 	present_dep_types |= DEP_OUTPUT;
994       if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
995 	present_dep_types |= DEP_ANTI;
996       if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
997 	present_dep_types |= DEP_CONTROL;
998 
999       if (present_dep_types == 0)
1000 	/* There is no existing dep so it should be created.  */
1001 	return DEP_CREATED;
1002 
1003       if (!(current_sched_info->flags & DO_SPECULATION)
1004 	  || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
1005 	{
1006 	  if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
1007 	      == present_dep_types)
1008 	    /* DEP does not add anything to the existing dependence.  */
1009 	    return DEP_PRESENT;
1010 	}
1011       else
1012 	{
1013 	  /* Only true dependencies can be data speculative and
1014 	     only anti dependencies can be control speculative.  */
1015 	  gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1016 		      == present_dep_types);
1017 
1018 	  /* if (DEP is SPECULATIVE) then
1019 	     ..we should update DEP_STATUS
1020 	     else
1021 	     ..we should reset existing dep to non-speculative.  */
1022 	}
1023     }
1024 
1025   return DEP_CHANGED;
1026 }
1027 
1028 /* Set dependency caches according to DEP.  */
1029 static void
set_dependency_caches(dep_t dep)1030 set_dependency_caches (dep_t dep)
1031 {
1032   int elem_luid = INSN_LUID (DEP_PRO (dep));
1033   int insn_luid = INSN_LUID (DEP_CON (dep));
1034 
1035   if (!(current_sched_info->flags & USE_DEPS_LIST))
1036     {
1037       switch (DEP_TYPE (dep))
1038 	{
1039 	case REG_DEP_TRUE:
1040 	  bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1041 	  break;
1042 
1043 	case REG_DEP_OUTPUT:
1044 	  bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1045 	  break;
1046 
1047 	case REG_DEP_ANTI:
1048 	  bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1049 	  break;
1050 
1051 	case REG_DEP_CONTROL:
1052 	  bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1053 	  break;
1054 
1055 	default:
1056 	  gcc_unreachable ();
1057 	}
1058     }
1059   else
1060     {
1061       ds_t ds = DEP_STATUS (dep);
1062 
1063       if (ds & DEP_TRUE)
1064 	bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1065       if (ds & DEP_OUTPUT)
1066 	bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1067       if (ds & DEP_ANTI)
1068 	bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1069       if (ds & DEP_CONTROL)
1070 	bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1071 
1072       if (ds & SPECULATIVE)
1073 	{
1074 	  gcc_assert (current_sched_info->flags & DO_SPECULATION);
1075 	  bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1076 	}
1077     }
1078 }
1079 
1080 /* Type of dependence DEP have changed from OLD_TYPE.  Update dependency
1081    caches accordingly.  */
1082 static void
update_dependency_caches(dep_t dep,enum reg_note old_type)1083 update_dependency_caches (dep_t dep, enum reg_note old_type)
1084 {
1085   int elem_luid = INSN_LUID (DEP_PRO (dep));
1086   int insn_luid = INSN_LUID (DEP_CON (dep));
1087 
1088   /* Clear corresponding cache entry because type of the link
1089      may have changed.  Keep them if we use_deps_list.  */
1090   if (!(current_sched_info->flags & USE_DEPS_LIST))
1091     {
1092       switch (old_type)
1093 	{
1094 	case REG_DEP_OUTPUT:
1095 	  bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1096 	  break;
1097 
1098 	case REG_DEP_ANTI:
1099 	  bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1100 	  break;
1101 
1102 	case REG_DEP_CONTROL:
1103 	  bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1104 	  break;
1105 
1106 	default:
1107 	  gcc_unreachable ();
1108 	}
1109     }
1110 
1111   set_dependency_caches (dep);
1112 }
1113 
1114 /* Convert a dependence pointed to by SD_IT to be non-speculative.  */
1115 static void
change_spec_dep_to_hard(sd_iterator_def sd_it)1116 change_spec_dep_to_hard (sd_iterator_def sd_it)
1117 {
1118   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1119   dep_link_t link = DEP_NODE_BACK (node);
1120   dep_t dep = DEP_NODE_DEP (node);
1121   rtx elem = DEP_PRO (dep);
1122   rtx insn = DEP_CON (dep);
1123 
1124   move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1125 
1126   DEP_STATUS (dep) &= ~SPECULATIVE;
1127 
1128   if (true_dependency_cache != NULL)
1129     /* Clear the cache entry.  */
1130     bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1131 		      INSN_LUID (elem));
1132 }
1133 
1134 /* Update DEP to incorporate information from NEW_DEP.
1135    SD_IT points to DEP in case it should be moved to another list.
1136    MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1137    data-speculative dependence should be updated.  */
1138 static enum DEPS_ADJUST_RESULT
update_dep(dep_t dep,dep_t new_dep,sd_iterator_def sd_it ATTRIBUTE_UNUSED,rtx mem1 ATTRIBUTE_UNUSED,rtx mem2 ATTRIBUTE_UNUSED)1139 update_dep (dep_t dep, dep_t new_dep,
1140 	    sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1141 	    rtx mem1 ATTRIBUTE_UNUSED,
1142 	    rtx mem2 ATTRIBUTE_UNUSED)
1143 {
1144   enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1145   enum reg_note old_type = DEP_TYPE (dep);
1146   bool was_spec = dep_spec_p (dep);
1147 
1148   DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1149   DEP_MULTIPLE (dep) = 1;
1150 
1151   /* If this is a more restrictive type of dependence than the
1152      existing one, then change the existing dependence to this
1153      type.  */
1154   if ((int) DEP_TYPE (new_dep) < (int) old_type)
1155     {
1156       DEP_TYPE (dep) = DEP_TYPE (new_dep);
1157       res = DEP_CHANGED;
1158     }
1159 
1160   if (current_sched_info->flags & USE_DEPS_LIST)
1161     /* Update DEP_STATUS.  */
1162     {
1163       ds_t dep_status = DEP_STATUS (dep);
1164       ds_t ds = DEP_STATUS (new_dep);
1165       ds_t new_status = ds | dep_status;
1166 
1167       if (new_status & SPECULATIVE)
1168 	{
1169 	  /* Either existing dep or a dep we're adding or both are
1170 	     speculative.  */
1171 	  if (!(ds & SPECULATIVE)
1172 	      || !(dep_status & SPECULATIVE))
1173 	    /* The new dep can't be speculative.  */
1174 	    new_status &= ~SPECULATIVE;
1175 	  else
1176 	    {
1177 	      /* Both are speculative.  Merge probabilities.  */
1178 	      if (mem1 != NULL)
1179 		{
1180 		  dw_t dw;
1181 
1182 		  dw = estimate_dep_weak (mem1, mem2);
1183 		  ds = set_dep_weak (ds, BEGIN_DATA, dw);
1184 		}
1185 
1186 	      new_status = ds_merge (dep_status, ds);
1187 	    }
1188 	}
1189 
1190       ds = new_status;
1191 
1192       if (dep_status != ds)
1193 	{
1194 	  DEP_STATUS (dep) = ds;
1195 	  res = DEP_CHANGED;
1196 	}
1197     }
1198 
1199   if (was_spec && !dep_spec_p (dep))
1200     /* The old dep was speculative, but now it isn't.  */
1201     change_spec_dep_to_hard (sd_it);
1202 
1203   if (true_dependency_cache != NULL
1204       && res == DEP_CHANGED)
1205     update_dependency_caches (dep, old_type);
1206 
1207   return res;
1208 }
1209 
1210 /* Add or update  a dependence described by DEP.
1211    MEM1 and MEM2, if non-null, correspond to memory locations in case of
1212    data speculation.
1213 
1214    The function returns a value indicating if an old entry has been changed
1215    or a new entry has been added to insn's backward deps or nothing has
1216    been updated at all.  */
1217 static enum DEPS_ADJUST_RESULT
add_or_update_dep_1(dep_t new_dep,bool resolved_p,rtx mem1 ATTRIBUTE_UNUSED,rtx mem2 ATTRIBUTE_UNUSED)1218 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1219 		     rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1220 {
1221   bool maybe_present_p = true;
1222   bool present_p = false;
1223 
1224   gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1225 	      && DEP_PRO (new_dep) != DEP_CON (new_dep));
1226 
1227 #ifdef ENABLE_CHECKING
1228   check_dep (new_dep, mem1 != NULL);
1229 #endif
1230 
1231   if (true_dependency_cache != NULL)
1232     {
1233       switch (ask_dependency_caches (new_dep))
1234 	{
1235 	case DEP_PRESENT:
1236 	  return DEP_PRESENT;
1237 
1238 	case DEP_CHANGED:
1239 	  maybe_present_p = true;
1240 	  present_p = true;
1241 	  break;
1242 
1243 	case DEP_CREATED:
1244 	  maybe_present_p = false;
1245 	  present_p = false;
1246 	  break;
1247 
1248 	default:
1249 	  gcc_unreachable ();
1250 	  break;
1251 	}
1252     }
1253 
1254   /* Check that we don't already have this dependence.  */
1255   if (maybe_present_p)
1256     {
1257       dep_t present_dep;
1258       sd_iterator_def sd_it;
1259 
1260       gcc_assert (true_dependency_cache == NULL || present_p);
1261 
1262       present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1263 						  DEP_CON (new_dep),
1264 						  resolved_p, &sd_it);
1265 
1266       if (present_dep != NULL)
1267 	/* We found an existing dependency between ELEM and INSN.  */
1268 	return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1269       else
1270 	/* We didn't find a dep, it shouldn't present in the cache.  */
1271 	gcc_assert (!present_p);
1272     }
1273 
1274   /* Might want to check one level of transitivity to save conses.
1275      This check should be done in maybe_add_or_update_dep_1.
1276      Since we made it to add_or_update_dep_1, we must create
1277      (or update) a link.  */
1278 
1279   if (mem1 != NULL_RTX)
1280     {
1281       gcc_assert (sched_deps_info->generate_spec_deps);
1282       DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1283 					   estimate_dep_weak (mem1, mem2));
1284     }
1285 
1286   sd_add_dep (new_dep, resolved_p);
1287 
1288   return DEP_CREATED;
1289 }
1290 
1291 /* Initialize BACK_LIST_PTR with consumer's backward list and
1292    FORW_LIST_PTR with producer's forward list.  If RESOLVED_P is true
1293    initialize with lists that hold resolved deps.  */
1294 static void
get_back_and_forw_lists(dep_t dep,bool resolved_p,deps_list_t * back_list_ptr,deps_list_t * forw_list_ptr)1295 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1296 			 deps_list_t *back_list_ptr,
1297 			 deps_list_t *forw_list_ptr)
1298 {
1299   rtx con = DEP_CON (dep);
1300 
1301   if (!resolved_p)
1302     {
1303       if (dep_spec_p (dep))
1304 	*back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1305       else
1306 	*back_list_ptr = INSN_HARD_BACK_DEPS (con);
1307 
1308       *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1309     }
1310   else
1311     {
1312       *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1313       *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1314     }
1315 }
1316 
1317 /* Add dependence described by DEP.
1318    If RESOLVED_P is true treat the dependence as a resolved one.  */
1319 void
sd_add_dep(dep_t dep,bool resolved_p)1320 sd_add_dep (dep_t dep, bool resolved_p)
1321 {
1322   dep_node_t n = create_dep_node ();
1323   deps_list_t con_back_deps;
1324   deps_list_t pro_forw_deps;
1325   rtx elem = DEP_PRO (dep);
1326   rtx insn = DEP_CON (dep);
1327 
1328   gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1329 
1330   if ((current_sched_info->flags & DO_SPECULATION) == 0
1331       || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1332     DEP_STATUS (dep) &= ~SPECULATIVE;
1333 
1334   copy_dep (DEP_NODE_DEP (n), dep);
1335 
1336   get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1337 
1338   add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1339 
1340 #ifdef ENABLE_CHECKING
1341   check_dep (dep, false);
1342 #endif
1343 
1344   add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1345 
1346   /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1347      in the bitmap caches of dependency information.  */
1348   if (true_dependency_cache != NULL)
1349     set_dependency_caches (dep);
1350 }
1351 
1352 /* Add or update backward dependence between INSN and ELEM
1353    with given type DEP_TYPE and dep_status DS.
1354    This function is a convenience wrapper.  */
1355 enum DEPS_ADJUST_RESULT
sd_add_or_update_dep(dep_t dep,bool resolved_p)1356 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1357 {
1358   return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1359 }
1360 
1361 /* Resolved dependence pointed to by SD_IT.
1362    SD_IT will advance to the next element.  */
1363 void
sd_resolve_dep(sd_iterator_def sd_it)1364 sd_resolve_dep (sd_iterator_def sd_it)
1365 {
1366   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1367   dep_t dep = DEP_NODE_DEP (node);
1368   rtx pro = DEP_PRO (dep);
1369   rtx con = DEP_CON (dep);
1370 
1371   if (dep_spec_p (dep))
1372     move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1373 		   INSN_RESOLVED_BACK_DEPS (con));
1374   else
1375     move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1376 		   INSN_RESOLVED_BACK_DEPS (con));
1377 
1378   move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1379 		 INSN_RESOLVED_FORW_DEPS (pro));
1380 }
1381 
1382 /* Perform the inverse operation of sd_resolve_dep.  Restore the dependence
1383    pointed to by SD_IT to unresolved state.  */
1384 void
sd_unresolve_dep(sd_iterator_def sd_it)1385 sd_unresolve_dep (sd_iterator_def sd_it)
1386 {
1387   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1388   dep_t dep = DEP_NODE_DEP (node);
1389   rtx pro = DEP_PRO (dep);
1390   rtx con = DEP_CON (dep);
1391 
1392   if (dep_spec_p (dep))
1393     move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1394 		   INSN_SPEC_BACK_DEPS (con));
1395   else
1396     move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1397 		   INSN_HARD_BACK_DEPS (con));
1398 
1399   move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1400 		 INSN_FORW_DEPS (pro));
1401 }
1402 
1403 /* Make TO depend on all the FROM's producers.
1404    If RESOLVED_P is true add dependencies to the resolved lists.  */
1405 void
sd_copy_back_deps(rtx to,rtx from,bool resolved_p)1406 sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1407 {
1408   sd_list_types_def list_type;
1409   sd_iterator_def sd_it;
1410   dep_t dep;
1411 
1412   list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1413 
1414   FOR_EACH_DEP (from, list_type, sd_it, dep)
1415     {
1416       dep_def _new_dep, *new_dep = &_new_dep;
1417 
1418       copy_dep (new_dep, dep);
1419       DEP_CON (new_dep) = to;
1420       sd_add_dep (new_dep, resolved_p);
1421     }
1422 }
1423 
1424 /* Remove a dependency referred to by SD_IT.
1425    SD_IT will point to the next dependence after removal.  */
1426 void
sd_delete_dep(sd_iterator_def sd_it)1427 sd_delete_dep (sd_iterator_def sd_it)
1428 {
1429   dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1430   dep_t dep = DEP_NODE_DEP (n);
1431   rtx pro = DEP_PRO (dep);
1432   rtx con = DEP_CON (dep);
1433   deps_list_t con_back_deps;
1434   deps_list_t pro_forw_deps;
1435 
1436   if (true_dependency_cache != NULL)
1437     {
1438       int elem_luid = INSN_LUID (pro);
1439       int insn_luid = INSN_LUID (con);
1440 
1441       bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1442       bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1443       bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1444       bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1445 
1446       if (current_sched_info->flags & DO_SPECULATION)
1447 	bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1448     }
1449 
1450   get_back_and_forw_lists (dep, sd_it.resolved_p,
1451 			   &con_back_deps, &pro_forw_deps);
1452 
1453   remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1454   remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1455 
1456   delete_dep_node (n);
1457 }
1458 
1459 /* Dump size of the lists.  */
1460 #define DUMP_LISTS_SIZE (2)
1461 
1462 /* Dump dependencies of the lists.  */
1463 #define DUMP_LISTS_DEPS (4)
1464 
1465 /* Dump all information about the lists.  */
1466 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1467 
1468 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1469    FLAGS is a bit mask specifying what information about the lists needs
1470    to be printed.
1471    If FLAGS has the very first bit set, then dump all information about
1472    the lists and propagate this bit into the callee dump functions.  */
1473 static void
dump_lists(FILE * dump,rtx insn,sd_list_types_def types,int flags)1474 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1475 {
1476   sd_iterator_def sd_it;
1477   dep_t dep;
1478   int all;
1479 
1480   all = (flags & 1);
1481 
1482   if (all)
1483     flags |= DUMP_LISTS_ALL;
1484 
1485   fprintf (dump, "[");
1486 
1487   if (flags & DUMP_LISTS_SIZE)
1488     fprintf (dump, "%d; ", sd_lists_size (insn, types));
1489 
1490   if (flags & DUMP_LISTS_DEPS)
1491     {
1492       FOR_EACH_DEP (insn, types, sd_it, dep)
1493 	{
1494 	  dump_dep (dump, dep, dump_dep_flags | all);
1495 	  fprintf (dump, " ");
1496 	}
1497     }
1498 }
1499 
1500 /* Dump all information about deps_lists of INSN specified by TYPES
1501    to STDERR.  */
1502 void
sd_debug_lists(rtx insn,sd_list_types_def types)1503 sd_debug_lists (rtx insn, sd_list_types_def types)
1504 {
1505   dump_lists (stderr, insn, types, 1);
1506   fprintf (stderr, "\n");
1507 }
1508 
1509 /* A wrapper around add_dependence_1, to add a dependence of CON on
1510    PRO, with type DEP_TYPE.  This function implements special handling
1511    for REG_DEP_CONTROL dependencies.  For these, we optionally promote
1512    the type to REG_DEP_ANTI if we can determine that predication is
1513    impossible; otherwise we add additional true dependencies on the
1514    INSN_COND_DEPS list of the jump (which PRO must be).  */
1515 void
add_dependence(rtx con,rtx pro,enum reg_note dep_type)1516 add_dependence (rtx con, rtx pro, enum reg_note dep_type)
1517 {
1518   if (dep_type == REG_DEP_CONTROL
1519       && !(current_sched_info->flags & DO_PREDICATION))
1520     dep_type = REG_DEP_ANTI;
1521 
1522   /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1523      so we must also make the insn dependent on the setter of the
1524      condition.  */
1525   if (dep_type == REG_DEP_CONTROL)
1526     {
1527       rtx real_pro = pro;
1528       rtx other = real_insn_for_shadow (real_pro);
1529       rtx cond;
1530 
1531       if (other != NULL_RTX)
1532 	real_pro = other;
1533       cond = sched_get_reverse_condition_uncached (real_pro);
1534       /* Verify that the insn does not use a different value in
1535 	 the condition register than the one that was present at
1536 	 the jump.  */
1537       if (cond == NULL_RTX)
1538 	dep_type = REG_DEP_ANTI;
1539       else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1540 	{
1541 	  HARD_REG_SET uses;
1542 	  CLEAR_HARD_REG_SET (uses);
1543 	  note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1544 	  if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1545 	    dep_type = REG_DEP_ANTI;
1546 	}
1547       if (dep_type == REG_DEP_CONTROL)
1548 	{
1549 	  if (sched_verbose >= 5)
1550 	    fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1551 		     INSN_UID (real_pro));
1552 	  add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1553 			       REG_DEP_TRUE, false);
1554 	}
1555     }
1556 
1557   add_dependence_1 (con, pro, dep_type);
1558 }
1559 
1560 /* A convenience wrapper to operate on an entire list.  HARD should be
1561    true if DEP_NONREG should be set on newly created dependencies.  */
1562 
1563 static void
add_dependence_list(rtx insn,rtx list,int uncond,enum reg_note dep_type,bool hard)1564 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type,
1565 		     bool hard)
1566 {
1567   mark_as_hard = hard;
1568   for (; list; list = XEXP (list, 1))
1569     {
1570       if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1571 	add_dependence (insn, XEXP (list, 0), dep_type);
1572     }
1573   mark_as_hard = false;
1574 }
1575 
1576 /* Similar, but free *LISTP at the same time, when the context
1577    is not readonly.  HARD should be true if DEP_NONREG should be set on
1578    newly created dependencies.  */
1579 
1580 static void
add_dependence_list_and_free(struct deps_desc * deps,rtx insn,rtx * listp,int uncond,enum reg_note dep_type,bool hard)1581 add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
1582                               int uncond, enum reg_note dep_type, bool hard)
1583 {
1584   add_dependence_list (insn, *listp, uncond, dep_type, hard);
1585 
1586   /* We don't want to short-circuit dependencies involving debug
1587      insns, because they may cause actual dependencies to be
1588      disregarded.  */
1589   if (deps->readonly || DEBUG_INSN_P (insn))
1590     return;
1591 
1592   free_INSN_LIST_list (listp);
1593 }
1594 
1595 /* Remove all occurrences of INSN from LIST.  Return the number of
1596    occurrences removed.  */
1597 
1598 static int
remove_from_dependence_list(rtx insn,rtx * listp)1599 remove_from_dependence_list (rtx insn, rtx* listp)
1600 {
1601   int removed = 0;
1602 
1603   while (*listp)
1604     {
1605       if (XEXP (*listp, 0) == insn)
1606         {
1607           remove_free_INSN_LIST_node (listp);
1608           removed++;
1609           continue;
1610         }
1611 
1612       listp = &XEXP (*listp, 1);
1613     }
1614 
1615   return removed;
1616 }
1617 
1618 /* Same as above, but process two lists at once.  */
1619 static int
remove_from_both_dependence_lists(rtx insn,rtx * listp,rtx * exprp)1620 remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1621 {
1622   int removed = 0;
1623 
1624   while (*listp)
1625     {
1626       if (XEXP (*listp, 0) == insn)
1627         {
1628           remove_free_INSN_LIST_node (listp);
1629           remove_free_EXPR_LIST_node (exprp);
1630           removed++;
1631           continue;
1632         }
1633 
1634       listp = &XEXP (*listp, 1);
1635       exprp = &XEXP (*exprp, 1);
1636     }
1637 
1638   return removed;
1639 }
1640 
1641 /* Clear all dependencies for an insn.  */
1642 static void
delete_all_dependences(rtx insn)1643 delete_all_dependences (rtx insn)
1644 {
1645   sd_iterator_def sd_it;
1646   dep_t dep;
1647 
1648   /* The below cycle can be optimized to clear the caches and back_deps
1649      in one call but that would provoke duplication of code from
1650      delete_dep ().  */
1651 
1652   for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1653        sd_iterator_cond (&sd_it, &dep);)
1654     sd_delete_dep (sd_it);
1655 }
1656 
1657 /* All insns in a scheduling group except the first should only have
1658    dependencies on the previous insn in the group.  So we find the
1659    first instruction in the scheduling group by walking the dependence
1660    chains backwards. Then we add the dependencies for the group to
1661    the previous nonnote insn.  */
1662 
1663 static void
chain_to_prev_insn(rtx insn)1664 chain_to_prev_insn (rtx insn)
1665 {
1666   sd_iterator_def sd_it;
1667   dep_t dep;
1668   rtx prev_nonnote;
1669 
1670   FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1671     {
1672       rtx i = insn;
1673       rtx pro = DEP_PRO (dep);
1674 
1675       do
1676 	{
1677 	  i = prev_nonnote_insn (i);
1678 
1679 	  if (pro == i)
1680 	    goto next_link;
1681 	} while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1682 
1683       if (! sched_insns_conditions_mutex_p (i, pro))
1684 	add_dependence (i, pro, DEP_TYPE (dep));
1685     next_link:;
1686     }
1687 
1688   delete_all_dependences (insn);
1689 
1690   prev_nonnote = prev_nonnote_nondebug_insn (insn);
1691   if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1692       && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1693     add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1694 }
1695 
1696 /* Process an insn's memory dependencies.  There are four kinds of
1697    dependencies:
1698 
1699    (0) read dependence: read follows read
1700    (1) true dependence: read follows write
1701    (2) output dependence: write follows write
1702    (3) anti dependence: write follows read
1703 
1704    We are careful to build only dependencies which actually exist, and
1705    use transitivity to avoid building too many links.  */
1706 
1707 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1708    The MEM is a memory reference contained within INSN, which we are saving
1709    so that we can do memory aliasing on it.  */
1710 
1711 static void
add_insn_mem_dependence(struct deps_desc * deps,bool read_p,rtx insn,rtx mem)1712 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1713 			 rtx insn, rtx mem)
1714 {
1715   rtx *insn_list;
1716   rtx *mem_list;
1717   rtx link;
1718 
1719   gcc_assert (!deps->readonly);
1720   if (read_p)
1721     {
1722       insn_list = &deps->pending_read_insns;
1723       mem_list = &deps->pending_read_mems;
1724       if (!DEBUG_INSN_P (insn))
1725 	deps->pending_read_list_length++;
1726     }
1727   else
1728     {
1729       insn_list = &deps->pending_write_insns;
1730       mem_list = &deps->pending_write_mems;
1731       deps->pending_write_list_length++;
1732     }
1733 
1734   link = alloc_INSN_LIST (insn, *insn_list);
1735   *insn_list = link;
1736 
1737   if (sched_deps_info->use_cselib)
1738     {
1739       mem = shallow_copy_rtx (mem);
1740       XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1741 							GET_MODE (mem), insn);
1742     }
1743   link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1744   *mem_list = link;
1745 }
1746 
1747 /* Make a dependency between every memory reference on the pending lists
1748    and INSN, thus flushing the pending lists.  FOR_READ is true if emitting
1749    dependencies for a read operation, similarly with FOR_WRITE.  */
1750 
1751 static void
flush_pending_lists(struct deps_desc * deps,rtx insn,int for_read,int for_write)1752 flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
1753 		     int for_write)
1754 {
1755   if (for_write)
1756     {
1757       add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1758                                     1, REG_DEP_ANTI, true);
1759       if (!deps->readonly)
1760         {
1761           free_EXPR_LIST_list (&deps->pending_read_mems);
1762           deps->pending_read_list_length = 0;
1763         }
1764     }
1765 
1766   add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1767 				for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1768 				true);
1769 
1770   add_dependence_list_and_free (deps, insn,
1771                                 &deps->last_pending_memory_flush, 1,
1772                                 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1773 				true);
1774 
1775   add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1776 				REG_DEP_ANTI, true);
1777 
1778   if (DEBUG_INSN_P (insn))
1779     {
1780       if (for_write)
1781 	free_INSN_LIST_list (&deps->pending_read_insns);
1782       free_INSN_LIST_list (&deps->pending_write_insns);
1783       free_INSN_LIST_list (&deps->last_pending_memory_flush);
1784       free_INSN_LIST_list (&deps->pending_jump_insns);
1785     }
1786 
1787   if (!deps->readonly)
1788     {
1789       free_EXPR_LIST_list (&deps->pending_write_mems);
1790       deps->pending_write_list_length = 0;
1791 
1792       deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1793       deps->pending_flush_length = 1;
1794     }
1795   mark_as_hard = false;
1796 }
1797 
1798 /* Instruction which dependencies we are analyzing.  */
1799 static rtx cur_insn = NULL_RTX;
1800 
1801 /* Implement hooks for haifa scheduler.  */
1802 
1803 static void
haifa_start_insn(rtx insn)1804 haifa_start_insn (rtx insn)
1805 {
1806   gcc_assert (insn && !cur_insn);
1807 
1808   cur_insn = insn;
1809 }
1810 
1811 static void
haifa_finish_insn(void)1812 haifa_finish_insn (void)
1813 {
1814   cur_insn = NULL;
1815 }
1816 
1817 void
haifa_note_reg_set(int regno)1818 haifa_note_reg_set (int regno)
1819 {
1820   SET_REGNO_REG_SET (reg_pending_sets, regno);
1821 }
1822 
1823 void
haifa_note_reg_clobber(int regno)1824 haifa_note_reg_clobber (int regno)
1825 {
1826   SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1827 }
1828 
1829 void
haifa_note_reg_use(int regno)1830 haifa_note_reg_use (int regno)
1831 {
1832   SET_REGNO_REG_SET (reg_pending_uses, regno);
1833 }
1834 
1835 static void
haifa_note_mem_dep(rtx mem,rtx pending_mem,rtx pending_insn,ds_t ds)1836 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
1837 {
1838   if (!(ds & SPECULATIVE))
1839     {
1840       mem = NULL_RTX;
1841       pending_mem = NULL_RTX;
1842     }
1843   else
1844     gcc_assert (ds & BEGIN_DATA);
1845 
1846   {
1847     dep_def _dep, *dep = &_dep;
1848 
1849     init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1850                 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1851     DEP_NONREG (dep) = 1;
1852     maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1853   }
1854 
1855 }
1856 
1857 static void
haifa_note_dep(rtx elem,ds_t ds)1858 haifa_note_dep (rtx elem, ds_t ds)
1859 {
1860   dep_def _dep;
1861   dep_t dep = &_dep;
1862 
1863   init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1864   if (mark_as_hard)
1865     DEP_NONREG (dep) = 1;
1866   maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1867 }
1868 
1869 static void
note_reg_use(int r)1870 note_reg_use (int r)
1871 {
1872   if (sched_deps_info->note_reg_use)
1873     sched_deps_info->note_reg_use (r);
1874 }
1875 
1876 static void
note_reg_set(int r)1877 note_reg_set (int r)
1878 {
1879   if (sched_deps_info->note_reg_set)
1880     sched_deps_info->note_reg_set (r);
1881 }
1882 
1883 static void
note_reg_clobber(int r)1884 note_reg_clobber (int r)
1885 {
1886   if (sched_deps_info->note_reg_clobber)
1887     sched_deps_info->note_reg_clobber (r);
1888 }
1889 
1890 static void
note_mem_dep(rtx m1,rtx m2,rtx e,ds_t ds)1891 note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
1892 {
1893   if (sched_deps_info->note_mem_dep)
1894     sched_deps_info->note_mem_dep (m1, m2, e, ds);
1895 }
1896 
1897 static void
note_dep(rtx e,ds_t ds)1898 note_dep (rtx e, ds_t ds)
1899 {
1900   if (sched_deps_info->note_dep)
1901     sched_deps_info->note_dep (e, ds);
1902 }
1903 
1904 /* Return corresponding to DS reg_note.  */
1905 enum reg_note
ds_to_dt(ds_t ds)1906 ds_to_dt (ds_t ds)
1907 {
1908   if (ds & DEP_TRUE)
1909     return REG_DEP_TRUE;
1910   else if (ds & DEP_OUTPUT)
1911     return REG_DEP_OUTPUT;
1912   else if (ds & DEP_ANTI)
1913     return REG_DEP_ANTI;
1914   else
1915     {
1916       gcc_assert (ds & DEP_CONTROL);
1917       return REG_DEP_CONTROL;
1918     }
1919 }
1920 
1921 
1922 
1923 /* Functions for computation of info needed for register pressure
1924    sensitive insn scheduling.  */
1925 
1926 
1927 /* Allocate and return reg_use_data structure for REGNO and INSN.  */
1928 static struct reg_use_data *
create_insn_reg_use(int regno,rtx insn)1929 create_insn_reg_use (int regno, rtx insn)
1930 {
1931   struct reg_use_data *use;
1932 
1933   use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1934   use->regno = regno;
1935   use->insn = insn;
1936   use->next_insn_use = INSN_REG_USE_LIST (insn);
1937   INSN_REG_USE_LIST (insn) = use;
1938   return use;
1939 }
1940 
1941 /* Allocate and return reg_set_data structure for REGNO and INSN.  */
1942 static struct reg_set_data *
create_insn_reg_set(int regno,rtx insn)1943 create_insn_reg_set (int regno, rtx insn)
1944 {
1945   struct reg_set_data *set;
1946 
1947   set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1948   set->regno = regno;
1949   set->insn = insn;
1950   set->next_insn_set = INSN_REG_SET_LIST (insn);
1951   INSN_REG_SET_LIST (insn) = set;
1952   return set;
1953 }
1954 
1955 /* Set up insn register uses for INSN and dependency context DEPS.  */
1956 static void
setup_insn_reg_uses(struct deps_desc * deps,rtx insn)1957 setup_insn_reg_uses (struct deps_desc *deps, rtx insn)
1958 {
1959   unsigned i;
1960   reg_set_iterator rsi;
1961   rtx list;
1962   struct reg_use_data *use, *use2, *next;
1963   struct deps_reg *reg_last;
1964 
1965   EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1966     {
1967       if (i < FIRST_PSEUDO_REGISTER
1968 	  && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1969 	continue;
1970 
1971       if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1972 	  && ! REGNO_REG_SET_P (reg_pending_sets, i)
1973 	  && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1974 	/* Ignore use which is not dying.  */
1975 	continue;
1976 
1977       use = create_insn_reg_use (i, insn);
1978       use->next_regno_use = use;
1979       reg_last = &deps->reg_last[i];
1980 
1981       /* Create the cycle list of uses.  */
1982       for (list = reg_last->uses; list; list = XEXP (list, 1))
1983 	{
1984 	  use2 = create_insn_reg_use (i, XEXP (list, 0));
1985 	  next = use->next_regno_use;
1986 	  use->next_regno_use = use2;
1987 	  use2->next_regno_use = next;
1988 	}
1989     }
1990 }
1991 
1992 /* Register pressure info for the currently processed insn.  */
1993 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1994 
1995 /* Return TRUE if INSN has the use structure for REGNO.  */
1996 static bool
insn_use_p(rtx insn,int regno)1997 insn_use_p (rtx insn, int regno)
1998 {
1999   struct reg_use_data *use;
2000 
2001   for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2002     if (use->regno == regno)
2003       return true;
2004   return false;
2005 }
2006 
2007 /* Update the register pressure info after birth of pseudo register REGNO
2008    in INSN.  Arguments CLOBBER_P and UNUSED_P say correspondingly that
2009    the register is in clobber or unused after the insn.  */
2010 static void
mark_insn_pseudo_birth(rtx insn,int regno,bool clobber_p,bool unused_p)2011 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2012 {
2013   int incr, new_incr;
2014   enum reg_class cl;
2015 
2016   gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2017   cl = sched_regno_pressure_class[regno];
2018   if (cl != NO_REGS)
2019     {
2020       incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2021       if (clobber_p)
2022 	{
2023 	  new_incr = reg_pressure_info[cl].clobber_increase + incr;
2024 	  reg_pressure_info[cl].clobber_increase = new_incr;
2025 	}
2026       else if (unused_p)
2027 	{
2028 	  new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2029 	  reg_pressure_info[cl].unused_set_increase = new_incr;
2030 	}
2031       else
2032 	{
2033 	  new_incr = reg_pressure_info[cl].set_increase + incr;
2034 	  reg_pressure_info[cl].set_increase = new_incr;
2035 	  if (! insn_use_p (insn, regno))
2036 	    reg_pressure_info[cl].change += incr;
2037 	  create_insn_reg_set (regno, insn);
2038 	}
2039       gcc_assert (new_incr < (1 << INCREASE_BITS));
2040     }
2041 }
2042 
2043 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2044    hard registers involved in the birth.  */
2045 static void
mark_insn_hard_regno_birth(rtx insn,int regno,int nregs,bool clobber_p,bool unused_p)2046 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2047 			    bool clobber_p, bool unused_p)
2048 {
2049   enum reg_class cl;
2050   int new_incr, last = regno + nregs;
2051 
2052   while (regno < last)
2053     {
2054       gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2055       if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2056 	{
2057 	  cl = sched_regno_pressure_class[regno];
2058 	  if (cl != NO_REGS)
2059 	    {
2060 	      if (clobber_p)
2061 		{
2062 		  new_incr = reg_pressure_info[cl].clobber_increase + 1;
2063 		  reg_pressure_info[cl].clobber_increase = new_incr;
2064 		}
2065 	      else if (unused_p)
2066 		{
2067 		  new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2068 		  reg_pressure_info[cl].unused_set_increase = new_incr;
2069 		}
2070 	      else
2071 		{
2072 		  new_incr = reg_pressure_info[cl].set_increase + 1;
2073 		  reg_pressure_info[cl].set_increase = new_incr;
2074 		  if (! insn_use_p (insn, regno))
2075 		    reg_pressure_info[cl].change += 1;
2076 		  create_insn_reg_set (regno, insn);
2077 		}
2078 	      gcc_assert (new_incr < (1 << INCREASE_BITS));
2079 	    }
2080 	}
2081       regno++;
2082     }
2083 }
2084 
2085 /* Update the register pressure info after birth of pseudo or hard
2086    register REG in INSN.  Arguments CLOBBER_P and UNUSED_P say
2087    correspondingly that the register is in clobber or unused after the
2088    insn.  */
2089 static void
mark_insn_reg_birth(rtx insn,rtx reg,bool clobber_p,bool unused_p)2090 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2091 {
2092   int regno;
2093 
2094   if (GET_CODE (reg) == SUBREG)
2095     reg = SUBREG_REG (reg);
2096 
2097   if (! REG_P (reg))
2098     return;
2099 
2100   regno = REGNO (reg);
2101   if (regno < FIRST_PSEUDO_REGISTER)
2102     mark_insn_hard_regno_birth (insn, regno,
2103 				hard_regno_nregs[regno][GET_MODE (reg)],
2104 				clobber_p, unused_p);
2105   else
2106     mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2107 }
2108 
2109 /* Update the register pressure info after death of pseudo register
2110    REGNO.  */
2111 static void
mark_pseudo_death(int regno)2112 mark_pseudo_death (int regno)
2113 {
2114   int incr;
2115   enum reg_class cl;
2116 
2117   gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2118   cl = sched_regno_pressure_class[regno];
2119   if (cl != NO_REGS)
2120     {
2121       incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2122       reg_pressure_info[cl].change -= incr;
2123     }
2124 }
2125 
2126 /* Like mark_pseudo_death except that NREGS saying how many hard
2127    registers involved in the death.  */
2128 static void
mark_hard_regno_death(int regno,int nregs)2129 mark_hard_regno_death (int regno, int nregs)
2130 {
2131   enum reg_class cl;
2132   int last = regno + nregs;
2133 
2134   while (regno < last)
2135     {
2136       gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2137       if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2138 	{
2139 	  cl = sched_regno_pressure_class[regno];
2140 	  if (cl != NO_REGS)
2141 	    reg_pressure_info[cl].change -= 1;
2142 	}
2143       regno++;
2144     }
2145 }
2146 
2147 /* Update the register pressure info after death of pseudo or hard
2148    register REG.  */
2149 static void
mark_reg_death(rtx reg)2150 mark_reg_death (rtx reg)
2151 {
2152   int regno;
2153 
2154   if (GET_CODE (reg) == SUBREG)
2155     reg = SUBREG_REG (reg);
2156 
2157   if (! REG_P (reg))
2158     return;
2159 
2160   regno = REGNO (reg);
2161   if (regno < FIRST_PSEUDO_REGISTER)
2162     mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
2163   else
2164     mark_pseudo_death (regno);
2165 }
2166 
2167 /* Process SETTER of REG.  DATA is an insn containing the setter.  */
2168 static void
mark_insn_reg_store(rtx reg,const_rtx setter,void * data)2169 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2170 {
2171   if (setter != NULL_RTX && GET_CODE (setter) != SET)
2172     return;
2173   mark_insn_reg_birth
2174     ((rtx) data, reg, false,
2175      find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2176 }
2177 
2178 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs.  */
2179 static void
mark_insn_reg_clobber(rtx reg,const_rtx setter,void * data)2180 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2181 {
2182   if (GET_CODE (setter) == CLOBBER)
2183     mark_insn_reg_birth ((rtx) data, reg, true, false);
2184 }
2185 
2186 /* Set up reg pressure info related to INSN.  */
2187 void
init_insn_reg_pressure_info(rtx insn)2188 init_insn_reg_pressure_info (rtx insn)
2189 {
2190   int i, len;
2191   enum reg_class cl;
2192   static struct reg_pressure_data *pressure_info;
2193   rtx link;
2194 
2195   gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
2196 
2197   if (! INSN_P (insn))
2198     return;
2199 
2200   for (i = 0; i < ira_pressure_classes_num; i++)
2201     {
2202       cl = ira_pressure_classes[i];
2203       reg_pressure_info[cl].clobber_increase = 0;
2204       reg_pressure_info[cl].set_increase = 0;
2205       reg_pressure_info[cl].unused_set_increase = 0;
2206       reg_pressure_info[cl].change = 0;
2207     }
2208 
2209   note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2210 
2211   note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2212 
2213 #ifdef AUTO_INC_DEC
2214   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2215     if (REG_NOTE_KIND (link) == REG_INC)
2216       mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2217 #endif
2218 
2219   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2220     if (REG_NOTE_KIND (link) == REG_DEAD)
2221       mark_reg_death (XEXP (link, 0));
2222 
2223   len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2224   pressure_info
2225     = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2226   if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2227     INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2228 						    * sizeof (int), 1);
2229   for (i = 0; i < ira_pressure_classes_num; i++)
2230     {
2231       cl = ira_pressure_classes[i];
2232       pressure_info[i].clobber_increase
2233 	= reg_pressure_info[cl].clobber_increase;
2234       pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2235       pressure_info[i].unused_set_increase
2236 	= reg_pressure_info[cl].unused_set_increase;
2237       pressure_info[i].change = reg_pressure_info[cl].change;
2238     }
2239 }
2240 
2241 
2242 
2243 
2244 /* Internal variable for sched_analyze_[12] () functions.
2245    If it is nonzero, this means that sched_analyze_[12] looks
2246    at the most toplevel SET.  */
2247 static bool can_start_lhs_rhs_p;
2248 
2249 /* Extend reg info for the deps context DEPS given that
2250    we have just generated a register numbered REGNO.  */
2251 static void
extend_deps_reg_info(struct deps_desc * deps,int regno)2252 extend_deps_reg_info (struct deps_desc *deps, int regno)
2253 {
2254   int max_regno = regno + 1;
2255 
2256   gcc_assert (!reload_completed);
2257 
2258   /* In a readonly context, it would not hurt to extend info,
2259      but it should not be needed.  */
2260   if (reload_completed && deps->readonly)
2261     {
2262       deps->max_reg = max_regno;
2263       return;
2264     }
2265 
2266   if (max_regno > deps->max_reg)
2267     {
2268       deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2269                                    max_regno);
2270       memset (&deps->reg_last[deps->max_reg],
2271               0, (max_regno - deps->max_reg)
2272               * sizeof (struct deps_reg));
2273       deps->max_reg = max_regno;
2274     }
2275 }
2276 
2277 /* Extends REG_INFO_P if needed.  */
2278 void
maybe_extend_reg_info_p(void)2279 maybe_extend_reg_info_p (void)
2280 {
2281   /* Extend REG_INFO_P, if needed.  */
2282   if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2283     {
2284       size_t new_reg_info_p_size = max_regno + 128;
2285 
2286       gcc_assert (!reload_completed && sel_sched_p ());
2287 
2288       reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2289                                                     new_reg_info_p_size,
2290                                                     reg_info_p_size,
2291                                                     sizeof (*reg_info_p));
2292       reg_info_p_size = new_reg_info_p_size;
2293     }
2294 }
2295 
2296 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2297    The type of the reference is specified by REF and can be SET,
2298    CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE.  */
2299 
2300 static void
sched_analyze_reg(struct deps_desc * deps,int regno,enum machine_mode mode,enum rtx_code ref,rtx insn)2301 sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
2302 		   enum rtx_code ref, rtx insn)
2303 {
2304   /* We could emit new pseudos in renaming.  Extend the reg structures.  */
2305   if (!reload_completed && sel_sched_p ()
2306       && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2307     extend_deps_reg_info (deps, regno);
2308 
2309   maybe_extend_reg_info_p ();
2310 
2311   /* A hard reg in a wide mode may really be multiple registers.
2312      If so, mark all of them just like the first.  */
2313   if (regno < FIRST_PSEUDO_REGISTER)
2314     {
2315       int i = hard_regno_nregs[regno][mode];
2316       if (ref == SET)
2317 	{
2318 	  while (--i >= 0)
2319 	    note_reg_set (regno + i);
2320 	}
2321       else if (ref == USE)
2322 	{
2323 	  while (--i >= 0)
2324 	    note_reg_use (regno + i);
2325 	}
2326       else
2327 	{
2328 	  while (--i >= 0)
2329 	    note_reg_clobber (regno + i);
2330 	}
2331     }
2332 
2333   /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2334      it does not reload.  Ignore these as they have served their
2335      purpose already.  */
2336   else if (regno >= deps->max_reg)
2337     {
2338       enum rtx_code code = GET_CODE (PATTERN (insn));
2339       gcc_assert (code == USE || code == CLOBBER);
2340     }
2341 
2342   else
2343     {
2344       if (ref == SET)
2345 	note_reg_set (regno);
2346       else if (ref == USE)
2347 	note_reg_use (regno);
2348       else
2349 	note_reg_clobber (regno);
2350 
2351       /* Pseudos that are REG_EQUIV to something may be replaced
2352 	 by that during reloading.  We need only add dependencies for
2353 	the address in the REG_EQUIV note.  */
2354       if (!reload_completed && get_reg_known_equiv_p (regno))
2355 	{
2356 	  rtx t = get_reg_known_value (regno);
2357 	  if (MEM_P (t))
2358 	    sched_analyze_2 (deps, XEXP (t, 0), insn);
2359 	}
2360 
2361       /* Don't let it cross a call after scheduling if it doesn't
2362 	 already cross one.  */
2363       if (REG_N_CALLS_CROSSED (regno) == 0)
2364 	{
2365 	  if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2366 	    deps->sched_before_next_call
2367 	      = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2368 	  else
2369 	    add_dependence_list (insn, deps->last_function_call, 1,
2370 				 REG_DEP_ANTI, false);
2371 	}
2372     }
2373 }
2374 
2375 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2376    rtx, X, creating all dependencies generated by the write to the
2377    destination of X, and reads of everything mentioned.  */
2378 
2379 static void
sched_analyze_1(struct deps_desc * deps,rtx x,rtx insn)2380 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
2381 {
2382   rtx dest = XEXP (x, 0);
2383   enum rtx_code code = GET_CODE (x);
2384   bool cslr_p = can_start_lhs_rhs_p;
2385 
2386   can_start_lhs_rhs_p = false;
2387 
2388   gcc_assert (dest);
2389   if (dest == 0)
2390     return;
2391 
2392   if (cslr_p && sched_deps_info->start_lhs)
2393     sched_deps_info->start_lhs (dest);
2394 
2395   if (GET_CODE (dest) == PARALLEL)
2396     {
2397       int i;
2398 
2399       for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2400 	if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2401 	  sched_analyze_1 (deps,
2402 			   gen_rtx_CLOBBER (VOIDmode,
2403 					    XEXP (XVECEXP (dest, 0, i), 0)),
2404 			   insn);
2405 
2406       if (cslr_p && sched_deps_info->finish_lhs)
2407 	sched_deps_info->finish_lhs ();
2408 
2409       if (code == SET)
2410 	{
2411 	  can_start_lhs_rhs_p = cslr_p;
2412 
2413 	  sched_analyze_2 (deps, SET_SRC (x), insn);
2414 
2415 	  can_start_lhs_rhs_p = false;
2416 	}
2417 
2418       return;
2419     }
2420 
2421   while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2422 	 || GET_CODE (dest) == ZERO_EXTRACT)
2423     {
2424       if (GET_CODE (dest) == STRICT_LOW_PART
2425 	 || GET_CODE (dest) == ZERO_EXTRACT
2426 	 || df_read_modify_subreg_p (dest))
2427         {
2428 	  /* These both read and modify the result.  We must handle
2429              them as writes to get proper dependencies for following
2430              instructions.  We must handle them as reads to get proper
2431              dependencies from this to previous instructions.
2432              Thus we need to call sched_analyze_2.  */
2433 
2434 	  sched_analyze_2 (deps, XEXP (dest, 0), insn);
2435 	}
2436       if (GET_CODE (dest) == ZERO_EXTRACT)
2437 	{
2438 	  /* The second and third arguments are values read by this insn.  */
2439 	  sched_analyze_2 (deps, XEXP (dest, 1), insn);
2440 	  sched_analyze_2 (deps, XEXP (dest, 2), insn);
2441 	}
2442       dest = XEXP (dest, 0);
2443     }
2444 
2445   if (REG_P (dest))
2446     {
2447       int regno = REGNO (dest);
2448       enum machine_mode mode = GET_MODE (dest);
2449 
2450       sched_analyze_reg (deps, regno, mode, code, insn);
2451 
2452 #ifdef STACK_REGS
2453       /* Treat all writes to a stack register as modifying the TOS.  */
2454       if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2455 	{
2456 	  /* Avoid analyzing the same register twice.  */
2457 	  if (regno != FIRST_STACK_REG)
2458 	    sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2459 
2460 	  add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2461 			       FIRST_STACK_REG);
2462 	}
2463 #endif
2464     }
2465   else if (MEM_P (dest))
2466     {
2467       /* Writing memory.  */
2468       rtx t = dest;
2469 
2470       if (sched_deps_info->use_cselib)
2471 	{
2472 	  enum machine_mode address_mode = get_address_mode (dest);
2473 
2474 	  t = shallow_copy_rtx (dest);
2475 	  cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2476 				   GET_MODE (t), insn);
2477 	  XEXP (t, 0)
2478 	    = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2479 						insn);
2480 	}
2481       t = canon_rtx (t);
2482 
2483       /* Pending lists can't get larger with a readonly context.  */
2484       if (!deps->readonly
2485           && ((deps->pending_read_list_length + deps->pending_write_list_length)
2486               > MAX_PENDING_LIST_LENGTH))
2487 	{
2488 	  /* Flush all pending reads and writes to prevent the pending lists
2489 	     from getting any larger.  Insn scheduling runs too slowly when
2490 	     these lists get long.  When compiling GCC with itself,
2491 	     this flush occurs 8 times for sparc, and 10 times for m88k using
2492 	     the default value of 32.  */
2493 	  flush_pending_lists (deps, insn, false, true);
2494 	}
2495       else
2496 	{
2497 	  rtx pending, pending_mem;
2498 
2499 	  pending = deps->pending_read_insns;
2500 	  pending_mem = deps->pending_read_mems;
2501 	  while (pending)
2502 	    {
2503 	      if (anti_dependence (XEXP (pending_mem, 0), t)
2504 		  && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2505 		note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2506 			      DEP_ANTI);
2507 
2508 	      pending = XEXP (pending, 1);
2509 	      pending_mem = XEXP (pending_mem, 1);
2510 	    }
2511 
2512 	  pending = deps->pending_write_insns;
2513 	  pending_mem = deps->pending_write_mems;
2514 	  while (pending)
2515 	    {
2516 	      if (output_dependence (XEXP (pending_mem, 0), t)
2517 		  && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2518 		note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2519 			      DEP_OUTPUT);
2520 
2521 	      pending = XEXP (pending, 1);
2522 	      pending_mem = XEXP (pending_mem, 1);
2523 	    }
2524 
2525 	  add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2526 			       REG_DEP_ANTI, true);
2527 	  add_dependence_list (insn, deps->pending_jump_insns, 1,
2528 			       REG_DEP_CONTROL, true);
2529 
2530           if (!deps->readonly)
2531             add_insn_mem_dependence (deps, false, insn, dest);
2532 	}
2533       sched_analyze_2 (deps, XEXP (dest, 0), insn);
2534     }
2535 
2536   if (cslr_p && sched_deps_info->finish_lhs)
2537     sched_deps_info->finish_lhs ();
2538 
2539   /* Analyze reads.  */
2540   if (GET_CODE (x) == SET)
2541     {
2542       can_start_lhs_rhs_p = cslr_p;
2543 
2544       sched_analyze_2 (deps, SET_SRC (x), insn);
2545 
2546       can_start_lhs_rhs_p = false;
2547     }
2548 }
2549 
2550 /* Analyze the uses of memory and registers in rtx X in INSN.  */
2551 static void
sched_analyze_2(struct deps_desc * deps,rtx x,rtx insn)2552 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
2553 {
2554   int i;
2555   int j;
2556   enum rtx_code code;
2557   const char *fmt;
2558   bool cslr_p = can_start_lhs_rhs_p;
2559 
2560   can_start_lhs_rhs_p = false;
2561 
2562   gcc_assert (x);
2563   if (x == 0)
2564     return;
2565 
2566   if (cslr_p && sched_deps_info->start_rhs)
2567     sched_deps_info->start_rhs (x);
2568 
2569   code = GET_CODE (x);
2570 
2571   switch (code)
2572     {
2573     CASE_CONST_ANY:
2574     case SYMBOL_REF:
2575     case CONST:
2576     case LABEL_REF:
2577       /* Ignore constants.  */
2578       if (cslr_p && sched_deps_info->finish_rhs)
2579 	sched_deps_info->finish_rhs ();
2580 
2581       return;
2582 
2583 #ifdef HAVE_cc0
2584     case CC0:
2585       /* User of CC0 depends on immediately preceding insn.  */
2586       SCHED_GROUP_P (insn) = 1;
2587        /* Don't move CC0 setter to another block (it can set up the
2588         same flag for previous CC0 users which is safe).  */
2589       CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2590 
2591       if (cslr_p && sched_deps_info->finish_rhs)
2592 	sched_deps_info->finish_rhs ();
2593 
2594       return;
2595 #endif
2596 
2597     case REG:
2598       {
2599 	int regno = REGNO (x);
2600 	enum machine_mode mode = GET_MODE (x);
2601 
2602 	sched_analyze_reg (deps, regno, mode, USE, insn);
2603 
2604 #ifdef STACK_REGS
2605       /* Treat all reads of a stack register as modifying the TOS.  */
2606       if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2607 	{
2608 	  /* Avoid analyzing the same register twice.  */
2609 	  if (regno != FIRST_STACK_REG)
2610 	    sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2611 	  sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2612 	}
2613 #endif
2614 
2615 	if (cslr_p && sched_deps_info->finish_rhs)
2616 	  sched_deps_info->finish_rhs ();
2617 
2618 	return;
2619       }
2620 
2621     case MEM:
2622       {
2623 	/* Reading memory.  */
2624 	rtx u;
2625 	rtx pending, pending_mem;
2626 	rtx t = x;
2627 
2628 	if (sched_deps_info->use_cselib)
2629 	  {
2630 	    enum machine_mode address_mode = get_address_mode (t);
2631 
2632 	    t = shallow_copy_rtx (t);
2633 	    cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2634 				     GET_MODE (t), insn);
2635 	    XEXP (t, 0)
2636 	      = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2637 						  insn);
2638 	  }
2639 
2640 	if (!DEBUG_INSN_P (insn))
2641 	  {
2642 	    t = canon_rtx (t);
2643 	    pending = deps->pending_read_insns;
2644 	    pending_mem = deps->pending_read_mems;
2645 	    while (pending)
2646 	      {
2647 		if (read_dependence (XEXP (pending_mem, 0), t)
2648 		    && ! sched_insns_conditions_mutex_p (insn,
2649 							 XEXP (pending, 0)))
2650 		  note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2651 				DEP_ANTI);
2652 
2653 		pending = XEXP (pending, 1);
2654 		pending_mem = XEXP (pending_mem, 1);
2655 	      }
2656 
2657 	    pending = deps->pending_write_insns;
2658 	    pending_mem = deps->pending_write_mems;
2659 	    while (pending)
2660 	      {
2661 		if (true_dependence (XEXP (pending_mem, 0), VOIDmode, t)
2662 		    && ! sched_insns_conditions_mutex_p (insn,
2663 							 XEXP (pending, 0)))
2664 		  note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2665 				sched_deps_info->generate_spec_deps
2666 				? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2667 
2668 		pending = XEXP (pending, 1);
2669 		pending_mem = XEXP (pending_mem, 1);
2670 	      }
2671 
2672 	    for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2673 	      add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2674 
2675 	    for (u = deps->pending_jump_insns; u; u = XEXP (u, 1))
2676 	      if (deps_may_trap_p (x))
2677 		{
2678 		  if ((sched_deps_info->generate_spec_deps)
2679 		      && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2680 		    {
2681 		      ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2682 					      MAX_DEP_WEAK);
2683 
2684 		      note_dep (XEXP (u, 0), ds);
2685 		    }
2686 		  else
2687 		    add_dependence (insn, XEXP (u, 0), REG_DEP_CONTROL);
2688 		}
2689 	  }
2690 
2691 	/* Always add these dependencies to pending_reads, since
2692 	   this insn may be followed by a write.  */
2693         if (!deps->readonly)
2694           add_insn_mem_dependence (deps, true, insn, x);
2695 
2696 	sched_analyze_2 (deps, XEXP (x, 0), insn);
2697 
2698 	if (cslr_p && sched_deps_info->finish_rhs)
2699 	  sched_deps_info->finish_rhs ();
2700 
2701 	return;
2702       }
2703 
2704     /* Force pending stores to memory in case a trap handler needs them.  */
2705     case TRAP_IF:
2706       flush_pending_lists (deps, insn, true, false);
2707       break;
2708 
2709     case PREFETCH:
2710       if (PREFETCH_SCHEDULE_BARRIER_P (x))
2711 	reg_pending_barrier = TRUE_BARRIER;
2712       /* Prefetch insn contains addresses only.  So if the prefetch
2713 	 address has no registers, there will be no dependencies on
2714 	 the prefetch insn.  This is wrong with result code
2715 	 correctness point of view as such prefetch can be moved below
2716 	 a jump insn which usually generates MOVE_BARRIER preventing
2717 	 to move insns containing registers or memories through the
2718 	 barrier.  It is also wrong with generated code performance
2719 	 point of view as prefetch withouth dependecies will have a
2720 	 tendency to be issued later instead of earlier.  It is hard
2721 	 to generate accurate dependencies for prefetch insns as
2722 	 prefetch has only the start address but it is better to have
2723 	 something than nothing.  */
2724       if (!deps->readonly)
2725 	{
2726 	  rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2727 	  if (sched_deps_info->use_cselib)
2728 	    cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2729 	  add_insn_mem_dependence (deps, true, insn, x);
2730 	}
2731       break;
2732 
2733     case UNSPEC_VOLATILE:
2734       flush_pending_lists (deps, insn, true, true);
2735       /* FALLTHRU */
2736 
2737     case ASM_OPERANDS:
2738     case ASM_INPUT:
2739       {
2740 	/* Traditional and volatile asm instructions must be considered to use
2741 	   and clobber all hard registers, all pseudo-registers and all of
2742 	   memory.  So must TRAP_IF and UNSPEC_VOLATILE operations.
2743 
2744 	   Consider for instance a volatile asm that changes the fpu rounding
2745 	   mode.  An insn should not be moved across this even if it only uses
2746 	   pseudo-regs because it might give an incorrectly rounded result.  */
2747 	if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2748 	    && !DEBUG_INSN_P (insn))
2749 	  reg_pending_barrier = TRUE_BARRIER;
2750 
2751 	/* For all ASM_OPERANDS, we must traverse the vector of input operands.
2752 	   We can not just fall through here since then we would be confused
2753 	   by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2754 	   traditional asms unlike their normal usage.  */
2755 
2756 	if (code == ASM_OPERANDS)
2757 	  {
2758 	    for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2759 	      sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2760 
2761 	    if (cslr_p && sched_deps_info->finish_rhs)
2762 	      sched_deps_info->finish_rhs ();
2763 
2764 	    return;
2765 	  }
2766 	break;
2767       }
2768 
2769     case PRE_DEC:
2770     case POST_DEC:
2771     case PRE_INC:
2772     case POST_INC:
2773       /* These both read and modify the result.  We must handle them as writes
2774          to get proper dependencies for following instructions.  We must handle
2775          them as reads to get proper dependencies from this to previous
2776          instructions.  Thus we need to pass them to both sched_analyze_1
2777          and sched_analyze_2.  We must call sched_analyze_2 first in order
2778          to get the proper antecedent for the read.  */
2779       sched_analyze_2 (deps, XEXP (x, 0), insn);
2780       sched_analyze_1 (deps, x, insn);
2781 
2782       if (cslr_p && sched_deps_info->finish_rhs)
2783 	sched_deps_info->finish_rhs ();
2784 
2785       return;
2786 
2787     case POST_MODIFY:
2788     case PRE_MODIFY:
2789       /* op0 = op0 + op1 */
2790       sched_analyze_2 (deps, XEXP (x, 0), insn);
2791       sched_analyze_2 (deps, XEXP (x, 1), insn);
2792       sched_analyze_1 (deps, x, insn);
2793 
2794       if (cslr_p && sched_deps_info->finish_rhs)
2795 	sched_deps_info->finish_rhs ();
2796 
2797       return;
2798 
2799     default:
2800       break;
2801     }
2802 
2803   /* Other cases: walk the insn.  */
2804   fmt = GET_RTX_FORMAT (code);
2805   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2806     {
2807       if (fmt[i] == 'e')
2808 	sched_analyze_2 (deps, XEXP (x, i), insn);
2809       else if (fmt[i] == 'E')
2810 	for (j = 0; j < XVECLEN (x, i); j++)
2811 	  sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2812     }
2813 
2814   if (cslr_p && sched_deps_info->finish_rhs)
2815     sched_deps_info->finish_rhs ();
2816 }
2817 
2818 /* Analyze an INSN with pattern X to find all dependencies.  */
2819 static void
sched_analyze_insn(struct deps_desc * deps,rtx x,rtx insn)2820 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
2821 {
2822   RTX_CODE code = GET_CODE (x);
2823   rtx link;
2824   unsigned i;
2825   reg_set_iterator rsi;
2826 
2827   if (! reload_completed)
2828     {
2829       HARD_REG_SET temp;
2830 
2831       extract_insn (insn);
2832       preprocess_constraints ();
2833       ira_implicitly_set_insn_hard_regs (&temp);
2834       AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2835       IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2836     }
2837 
2838   can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2839 			 && code == SET);
2840 
2841   if (may_trap_p (x))
2842     /* Avoid moving trapping instructions across function calls that might
2843        not always return.  */
2844     add_dependence_list (insn, deps->last_function_call_may_noreturn,
2845 			 1, REG_DEP_ANTI, true);
2846 
2847   /* We must avoid creating a situation in which two successors of the
2848      current block have different unwind info after scheduling.  If at any
2849      point the two paths re-join this leads to incorrect unwind info.  */
2850   /* ??? There are certain situations involving a forced frame pointer in
2851      which, with extra effort, we could fix up the unwind info at a later
2852      CFG join.  However, it seems better to notice these cases earlier
2853      during prologue generation and avoid marking the frame pointer setup
2854      as frame-related at all.  */
2855   if (RTX_FRAME_RELATED_P (insn))
2856     {
2857       /* Make sure prologue insn is scheduled before next jump.  */
2858       deps->sched_before_next_jump
2859 	= alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2860 
2861       /* Make sure epilogue insn is scheduled after preceding jumps.  */
2862       add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2863 			   true);
2864     }
2865 
2866   if (code == COND_EXEC)
2867     {
2868       sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2869 
2870       /* ??? Should be recording conditions so we reduce the number of
2871 	 false dependencies.  */
2872       x = COND_EXEC_CODE (x);
2873       code = GET_CODE (x);
2874     }
2875   if (code == SET || code == CLOBBER)
2876     {
2877       sched_analyze_1 (deps, x, insn);
2878 
2879       /* Bare clobber insns are used for letting life analysis, reg-stack
2880 	 and others know that a value is dead.  Depend on the last call
2881 	 instruction so that reg-stack won't get confused.  */
2882       if (code == CLOBBER)
2883 	add_dependence_list (insn, deps->last_function_call, 1,
2884 			     REG_DEP_OUTPUT, true);
2885     }
2886   else if (code == PARALLEL)
2887     {
2888       for (i = XVECLEN (x, 0); i--;)
2889 	{
2890 	  rtx sub = XVECEXP (x, 0, i);
2891 	  code = GET_CODE (sub);
2892 
2893 	  if (code == COND_EXEC)
2894 	    {
2895 	      sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2896 	      sub = COND_EXEC_CODE (sub);
2897 	      code = GET_CODE (sub);
2898 	    }
2899 	  if (code == SET || code == CLOBBER)
2900 	    sched_analyze_1 (deps, sub, insn);
2901 	  else
2902 	    sched_analyze_2 (deps, sub, insn);
2903 	}
2904     }
2905   else
2906     sched_analyze_2 (deps, x, insn);
2907 
2908   /* Mark registers CLOBBERED or used by called function.  */
2909   if (CALL_P (insn))
2910     {
2911       for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2912 	{
2913 	  if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2914 	    sched_analyze_1 (deps, XEXP (link, 0), insn);
2915 	  else if (GET_CODE (XEXP (link, 0)) != SET)
2916 	    sched_analyze_2 (deps, XEXP (link, 0), insn);
2917 	}
2918       /* Don't schedule anything after a tail call, tail call needs
2919 	 to use at least all call-saved registers.  */
2920       if (SIBLING_CALL_P (insn))
2921 	reg_pending_barrier = TRUE_BARRIER;
2922       else if (find_reg_note (insn, REG_SETJMP, NULL))
2923 	reg_pending_barrier = MOVE_BARRIER;
2924     }
2925 
2926   if (JUMP_P (insn))
2927     {
2928       rtx next;
2929       next = next_nonnote_nondebug_insn (insn);
2930       if (next && BARRIER_P (next))
2931 	reg_pending_barrier = MOVE_BARRIER;
2932       else
2933 	{
2934 	  rtx pending, pending_mem;
2935 
2936           if (sched_deps_info->compute_jump_reg_dependencies)
2937             {
2938               (*sched_deps_info->compute_jump_reg_dependencies)
2939 		(insn, reg_pending_control_uses);
2940 
2941               /* Make latency of jump equal to 0 by using anti-dependence.  */
2942               EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
2943                 {
2944                   struct deps_reg *reg_last = &deps->reg_last[i];
2945                   add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
2946 				       false);
2947                   add_dependence_list (insn, reg_last->implicit_sets,
2948 				       0, REG_DEP_ANTI, false);
2949                   add_dependence_list (insn, reg_last->clobbers, 0,
2950 				       REG_DEP_ANTI, false);
2951                 }
2952             }
2953 
2954 	  /* All memory writes and volatile reads must happen before the
2955 	     jump.  Non-volatile reads must happen before the jump iff
2956 	     the result is needed by the above register used mask.  */
2957 
2958 	  pending = deps->pending_write_insns;
2959 	  pending_mem = deps->pending_write_mems;
2960 	  while (pending)
2961 	    {
2962 	      if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2963 		add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2964 	      pending = XEXP (pending, 1);
2965 	      pending_mem = XEXP (pending_mem, 1);
2966 	    }
2967 
2968 	  pending = deps->pending_read_insns;
2969 	  pending_mem = deps->pending_read_mems;
2970 	  while (pending)
2971 	    {
2972 	      if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
2973 		  && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2974 		add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2975 	      pending = XEXP (pending, 1);
2976 	      pending_mem = XEXP (pending_mem, 1);
2977 	    }
2978 
2979 	  add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2980 			       REG_DEP_ANTI, true);
2981 	  add_dependence_list (insn, deps->pending_jump_insns, 1,
2982 			       REG_DEP_ANTI, true);
2983 	}
2984     }
2985 
2986   /* If this instruction can throw an exception, then moving it changes
2987      where block boundaries fall.  This is mighty confusing elsewhere.
2988      Therefore, prevent such an instruction from being moved.  Same for
2989      non-jump instructions that define block boundaries.
2990      ??? Unclear whether this is still necessary in EBB mode.  If not,
2991      add_branch_dependences should be adjusted for RGN mode instead.  */
2992   if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
2993       || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
2994     reg_pending_barrier = MOVE_BARRIER;
2995 
2996   if (sched_pressure != SCHED_PRESSURE_NONE)
2997     {
2998       setup_insn_reg_uses (deps, insn);
2999       init_insn_reg_pressure_info (insn);
3000     }
3001 
3002   /* Add register dependencies for insn.  */
3003   if (DEBUG_INSN_P (insn))
3004     {
3005       rtx prev = deps->last_debug_insn;
3006       rtx u;
3007 
3008       if (!deps->readonly)
3009 	deps->last_debug_insn = insn;
3010 
3011       if (prev)
3012 	add_dependence (insn, prev, REG_DEP_ANTI);
3013 
3014       add_dependence_list (insn, deps->last_function_call, 1,
3015 			   REG_DEP_ANTI, false);
3016 
3017       if (!sel_sched_p ())
3018 	for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
3019 	  add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3020 
3021       EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3022 	{
3023 	  struct deps_reg *reg_last = &deps->reg_last[i];
3024 	  add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
3025 	  /* There's no point in making REG_DEP_CONTROL dependencies for
3026 	     debug insns.  */
3027 	  add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3028 			       false);
3029 
3030 	  if (!deps->readonly)
3031 	    reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3032 	}
3033       CLEAR_REG_SET (reg_pending_uses);
3034 
3035       /* Quite often, a debug insn will refer to stuff in the
3036 	 previous instruction, but the reason we want this
3037 	 dependency here is to make sure the scheduler doesn't
3038 	 gratuitously move a debug insn ahead.  This could dirty
3039 	 DF flags and cause additional analysis that wouldn't have
3040 	 occurred in compilation without debug insns, and such
3041 	 additional analysis can modify the generated code.  */
3042       prev = PREV_INSN (insn);
3043 
3044       if (prev && NONDEBUG_INSN_P (prev))
3045 	add_dependence (insn, prev, REG_DEP_ANTI);
3046     }
3047   else
3048     {
3049       regset_head set_or_clobbered;
3050 
3051       EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3052 	{
3053 	  struct deps_reg *reg_last = &deps->reg_last[i];
3054 	  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3055 	  add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3056 			       false);
3057 	  add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3058 			       false);
3059 
3060 	  if (!deps->readonly)
3061 	    {
3062 	      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3063 	      reg_last->uses_length++;
3064 	    }
3065 	}
3066 
3067       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3068 	if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3069 	  {
3070 	    struct deps_reg *reg_last = &deps->reg_last[i];
3071 	    add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3072 	    add_dependence_list (insn, reg_last->implicit_sets, 0,
3073 				 REG_DEP_ANTI, false);
3074 	    add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3075 				 false);
3076 
3077 	    if (!deps->readonly)
3078 	      {
3079 		reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3080 		reg_last->uses_length++;
3081 	      }
3082 	  }
3083 
3084       if (targetm.sched.exposed_pipeline)
3085 	{
3086 	  INIT_REG_SET (&set_or_clobbered);
3087 	  bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3088 		      reg_pending_sets);
3089 	  EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3090 	    {
3091 	      struct deps_reg *reg_last = &deps->reg_last[i];
3092 	      rtx list;
3093 	      for (list = reg_last->uses; list; list = XEXP (list, 1))
3094 		{
3095 		  rtx other = XEXP (list, 0);
3096 		  if (INSN_CACHED_COND (other) != const_true_rtx
3097 		      && refers_to_regno_p (i, i + 1, INSN_CACHED_COND (other), NULL))
3098 		    INSN_CACHED_COND (other) = const_true_rtx;
3099 		}
3100 	    }
3101 	}
3102 
3103       /* If the current insn is conditional, we can't free any
3104 	 of the lists.  */
3105       if (sched_has_condition_p (insn))
3106 	{
3107 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3108 	    {
3109 	      struct deps_reg *reg_last = &deps->reg_last[i];
3110 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3111 				   false);
3112 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3113 				   REG_DEP_ANTI, false);
3114 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3115 				   false);
3116 	      add_dependence_list (insn, reg_last->control_uses, 0,
3117 				   REG_DEP_CONTROL, false);
3118 
3119 	      if (!deps->readonly)
3120 		{
3121 		  reg_last->clobbers
3122 		    = alloc_INSN_LIST (insn, reg_last->clobbers);
3123 		  reg_last->clobbers_length++;
3124 		}
3125 	    }
3126 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3127 	    {
3128 	      struct deps_reg *reg_last = &deps->reg_last[i];
3129 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3130 				   false);
3131 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3132 				   REG_DEP_ANTI, false);
3133 	      add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3134 				   false);
3135 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3136 				   false);
3137 	      add_dependence_list (insn, reg_last->control_uses, 0,
3138 				   REG_DEP_CONTROL, false);
3139 
3140 	      if (!deps->readonly)
3141 		reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3142 	    }
3143 	}
3144       else
3145 	{
3146 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3147 	    {
3148 	      struct deps_reg *reg_last = &deps->reg_last[i];
3149 	      if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
3150 		  || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
3151 		{
3152 		  add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3153 						REG_DEP_OUTPUT, false);
3154 		  add_dependence_list_and_free (deps, insn,
3155 						&reg_last->implicit_sets, 0,
3156 						REG_DEP_ANTI, false);
3157 		  add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3158 						REG_DEP_ANTI, false);
3159 		  add_dependence_list_and_free (deps, insn,
3160 						&reg_last->control_uses, 0,
3161 						REG_DEP_ANTI, false);
3162 		  add_dependence_list_and_free (deps, insn,
3163 						&reg_last->clobbers, 0,
3164 						REG_DEP_OUTPUT, false);
3165 
3166 		  if (!deps->readonly)
3167 		    {
3168 		      reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3169 		      reg_last->clobbers_length = 0;
3170 		      reg_last->uses_length = 0;
3171 		    }
3172 		}
3173 	      else
3174 		{
3175 		  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3176 				       false);
3177 		  add_dependence_list (insn, reg_last->implicit_sets, 0,
3178 				       REG_DEP_ANTI, false);
3179 		  add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3180 				       false);
3181 		  add_dependence_list (insn, reg_last->control_uses, 0,
3182 				       REG_DEP_CONTROL, false);
3183 		}
3184 
3185 	      if (!deps->readonly)
3186 		{
3187 		  reg_last->clobbers_length++;
3188 		  reg_last->clobbers
3189 		    = alloc_INSN_LIST (insn, reg_last->clobbers);
3190 		}
3191 	    }
3192 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3193 	    {
3194 	      struct deps_reg *reg_last = &deps->reg_last[i];
3195 
3196 	      add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3197 					    REG_DEP_OUTPUT, false);
3198 	      add_dependence_list_and_free (deps, insn,
3199 					    &reg_last->implicit_sets,
3200 					    0, REG_DEP_ANTI, false);
3201 	      add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3202 					    REG_DEP_OUTPUT, false);
3203 	      add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3204 					    REG_DEP_ANTI, false);
3205 	      add_dependence_list (insn, reg_last->control_uses, 0,
3206 				   REG_DEP_CONTROL, false);
3207 
3208 	      if (!deps->readonly)
3209 		{
3210 		  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3211 		  reg_last->uses_length = 0;
3212 		  reg_last->clobbers_length = 0;
3213 		}
3214 	    }
3215 	}
3216       if (!deps->readonly)
3217 	{
3218 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3219 	    {
3220 	      struct deps_reg *reg_last = &deps->reg_last[i];
3221 	      reg_last->control_uses
3222 		= alloc_INSN_LIST (insn, reg_last->control_uses);
3223 	    }
3224 	}
3225     }
3226 
3227   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3228     if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3229       {
3230 	struct deps_reg *reg_last = &deps->reg_last[i];
3231 	add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3232 	add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3233 	add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3234 	add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3235 			     false);
3236 
3237 	if (!deps->readonly)
3238 	  reg_last->implicit_sets
3239 	    = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3240       }
3241 
3242   if (!deps->readonly)
3243     {
3244       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3245       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3246       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3247       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3248 	if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3249 	    || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3250 	  SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3251 
3252       /* Set up the pending barrier found.  */
3253       deps->last_reg_pending_barrier = reg_pending_barrier;
3254     }
3255 
3256   CLEAR_REG_SET (reg_pending_uses);
3257   CLEAR_REG_SET (reg_pending_clobbers);
3258   CLEAR_REG_SET (reg_pending_sets);
3259   CLEAR_REG_SET (reg_pending_control_uses);
3260   CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3261   CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3262 
3263   /* Add dependencies if a scheduling barrier was found.  */
3264   if (reg_pending_barrier)
3265     {
3266       /* In the case of barrier the most added dependencies are not
3267          real, so we use anti-dependence here.  */
3268       if (sched_has_condition_p (insn))
3269 	{
3270 	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3271 	    {
3272 	      struct deps_reg *reg_last = &deps->reg_last[i];
3273 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3274 				   true);
3275 	      add_dependence_list (insn, reg_last->sets, 0,
3276 				   reg_pending_barrier == TRUE_BARRIER
3277 				   ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3278 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3279 				   REG_DEP_ANTI, true);
3280 	      add_dependence_list (insn, reg_last->clobbers, 0,
3281 				   reg_pending_barrier == TRUE_BARRIER
3282 				   ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3283 	    }
3284 	}
3285       else
3286 	{
3287 	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3288 	    {
3289 	      struct deps_reg *reg_last = &deps->reg_last[i];
3290 	      add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3291 					    REG_DEP_ANTI, true);
3292 	      add_dependence_list_and_free (deps, insn,
3293 					    &reg_last->control_uses, 0,
3294 					    REG_DEP_CONTROL, true);
3295 	      add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3296 					    reg_pending_barrier == TRUE_BARRIER
3297 					    ? REG_DEP_TRUE : REG_DEP_ANTI,
3298 					    true);
3299 	      add_dependence_list_and_free (deps, insn,
3300 					    &reg_last->implicit_sets, 0,
3301 					    REG_DEP_ANTI, true);
3302 	      add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3303 					    reg_pending_barrier == TRUE_BARRIER
3304 					    ? REG_DEP_TRUE : REG_DEP_ANTI,
3305 					    true);
3306 
3307               if (!deps->readonly)
3308                 {
3309                   reg_last->uses_length = 0;
3310                   reg_last->clobbers_length = 0;
3311                 }
3312 	    }
3313 	}
3314 
3315       if (!deps->readonly)
3316         for (i = 0; i < (unsigned)deps->max_reg; i++)
3317           {
3318             struct deps_reg *reg_last = &deps->reg_last[i];
3319             reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3320             SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3321           }
3322 
3323       /* Don't flush pending lists on speculative checks for
3324 	 selective scheduling.  */
3325       if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
3326 	flush_pending_lists (deps, insn, true, true);
3327 
3328       reg_pending_barrier = NOT_A_BARRIER;
3329     }
3330 
3331   /* If a post-call group is still open, see if it should remain so.
3332      This insn must be a simple move of a hard reg to a pseudo or
3333      vice-versa.
3334 
3335      We must avoid moving these insns for correctness on targets
3336      with small register classes, and for special registers like
3337      PIC_OFFSET_TABLE_REGNUM.  For simplicity, extend this to all
3338      hard regs for all targets.  */
3339 
3340   if (deps->in_post_call_group_p)
3341     {
3342       rtx tmp, set = single_set (insn);
3343       int src_regno, dest_regno;
3344 
3345       if (set == NULL)
3346 	{
3347 	  if (DEBUG_INSN_P (insn))
3348 	    /* We don't want to mark debug insns as part of the same
3349 	       sched group.  We know they really aren't, but if we use
3350 	       debug insns to tell that a call group is over, we'll
3351 	       get different code if debug insns are not there and
3352 	       instructions that follow seem like they should be part
3353 	       of the call group.
3354 
3355 	       Also, if we did, chain_to_prev_insn would move the
3356 	       deps of the debug insn to the call insn, modifying
3357 	       non-debug post-dependency counts of the debug insn
3358 	       dependencies and otherwise messing with the scheduling
3359 	       order.
3360 
3361 	       Instead, let such debug insns be scheduled freely, but
3362 	       keep the call group open in case there are insns that
3363 	       should be part of it afterwards.  Since we grant debug
3364 	       insns higher priority than even sched group insns, it
3365 	       will all turn out all right.  */
3366 	    goto debug_dont_end_call_group;
3367 	  else
3368 	    goto end_call_group;
3369 	}
3370 
3371       tmp = SET_DEST (set);
3372       if (GET_CODE (tmp) == SUBREG)
3373 	tmp = SUBREG_REG (tmp);
3374       if (REG_P (tmp))
3375 	dest_regno = REGNO (tmp);
3376       else
3377 	goto end_call_group;
3378 
3379       tmp = SET_SRC (set);
3380       if (GET_CODE (tmp) == SUBREG)
3381 	tmp = SUBREG_REG (tmp);
3382       if ((GET_CODE (tmp) == PLUS
3383 	   || GET_CODE (tmp) == MINUS)
3384 	  && REG_P (XEXP (tmp, 0))
3385 	  && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3386 	  && dest_regno == STACK_POINTER_REGNUM)
3387 	src_regno = STACK_POINTER_REGNUM;
3388       else if (REG_P (tmp))
3389 	src_regno = REGNO (tmp);
3390       else
3391 	goto end_call_group;
3392 
3393       if (src_regno < FIRST_PSEUDO_REGISTER
3394 	  || dest_regno < FIRST_PSEUDO_REGISTER)
3395 	{
3396 	  if (!deps->readonly
3397               && deps->in_post_call_group_p == post_call_initial)
3398 	    deps->in_post_call_group_p = post_call;
3399 
3400           if (!sel_sched_p () || sched_emulate_haifa_p)
3401             {
3402               SCHED_GROUP_P (insn) = 1;
3403               CANT_MOVE (insn) = 1;
3404             }
3405 	}
3406       else
3407 	{
3408 	end_call_group:
3409           if (!deps->readonly)
3410             deps->in_post_call_group_p = not_post_call;
3411 	}
3412     }
3413 
3414  debug_dont_end_call_group:
3415   if ((current_sched_info->flags & DO_SPECULATION)
3416       && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3417     /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3418        be speculated.  */
3419     {
3420       if (sel_sched_p ())
3421         sel_mark_hard_insn (insn);
3422       else
3423         {
3424           sd_iterator_def sd_it;
3425           dep_t dep;
3426 
3427           for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3428                sd_iterator_cond (&sd_it, &dep);)
3429             change_spec_dep_to_hard (sd_it);
3430         }
3431     }
3432 }
3433 
3434 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3435    longjmp, loop forever, ...).  */
3436 /* FIXME: Why can't this function just use flags_from_decl_or_type and
3437    test for ECF_NORETURN?  */
3438 static bool
call_may_noreturn_p(rtx insn)3439 call_may_noreturn_p (rtx insn)
3440 {
3441   rtx call;
3442 
3443   /* const or pure calls that aren't looping will always return.  */
3444   if (RTL_CONST_OR_PURE_CALL_P (insn)
3445       && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3446     return false;
3447 
3448   call = get_call_rtx_from (insn);
3449   if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3450     {
3451       rtx symbol = XEXP (XEXP (call, 0), 0);
3452       if (SYMBOL_REF_DECL (symbol)
3453 	  && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3454 	{
3455 	  if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3456 	      == BUILT_IN_NORMAL)
3457 	    switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3458 	      {
3459 	      case BUILT_IN_BCMP:
3460 	      case BUILT_IN_BCOPY:
3461 	      case BUILT_IN_BZERO:
3462 	      case BUILT_IN_INDEX:
3463 	      case BUILT_IN_MEMCHR:
3464 	      case BUILT_IN_MEMCMP:
3465 	      case BUILT_IN_MEMCPY:
3466 	      case BUILT_IN_MEMMOVE:
3467 	      case BUILT_IN_MEMPCPY:
3468 	      case BUILT_IN_MEMSET:
3469 	      case BUILT_IN_RINDEX:
3470 	      case BUILT_IN_STPCPY:
3471 	      case BUILT_IN_STPNCPY:
3472 	      case BUILT_IN_STRCAT:
3473 	      case BUILT_IN_STRCHR:
3474 	      case BUILT_IN_STRCMP:
3475 	      case BUILT_IN_STRCPY:
3476 	      case BUILT_IN_STRCSPN:
3477 	      case BUILT_IN_STRLEN:
3478 	      case BUILT_IN_STRNCAT:
3479 	      case BUILT_IN_STRNCMP:
3480 	      case BUILT_IN_STRNCPY:
3481 	      case BUILT_IN_STRPBRK:
3482 	      case BUILT_IN_STRRCHR:
3483 	      case BUILT_IN_STRSPN:
3484 	      case BUILT_IN_STRSTR:
3485 		/* Assume certain string/memory builtins always return.  */
3486 		return false;
3487 	      default:
3488 		break;
3489 	      }
3490 	}
3491     }
3492 
3493   /* For all other calls assume that they might not always return.  */
3494   return true;
3495 }
3496 
3497 /* Return true if INSN should be made dependent on the previous instruction
3498    group, and if all INSN's dependencies should be moved to the first
3499    instruction of that group.  */
3500 
3501 static bool
chain_to_prev_insn_p(rtx insn)3502 chain_to_prev_insn_p (rtx insn)
3503 {
3504   rtx prev, x;
3505 
3506   /* INSN forms a group with the previous instruction.  */
3507   if (SCHED_GROUP_P (insn))
3508     return true;
3509 
3510   /* If the previous instruction clobbers a register R and this one sets
3511      part of R, the clobber was added specifically to help us track the
3512      liveness of R.  There's no point scheduling the clobber and leaving
3513      INSN behind, especially if we move the clobber to another block.  */
3514   prev = prev_nonnote_nondebug_insn (insn);
3515   if (prev
3516       && INSN_P (prev)
3517       && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3518       && GET_CODE (PATTERN (prev)) == CLOBBER)
3519     {
3520       x = XEXP (PATTERN (prev), 0);
3521       if (set_of (x, insn))
3522 	return true;
3523     }
3524 
3525   return false;
3526 }
3527 
3528 /* Analyze INSN with DEPS as a context.  */
3529 void
deps_analyze_insn(struct deps_desc * deps,rtx insn)3530 deps_analyze_insn (struct deps_desc *deps, rtx insn)
3531 {
3532   if (sched_deps_info->start_insn)
3533     sched_deps_info->start_insn (insn);
3534 
3535   /* Record the condition for this insn.  */
3536   if (NONDEBUG_INSN_P (insn))
3537     {
3538       rtx t;
3539       sched_get_condition_with_rev (insn, NULL);
3540       t = INSN_CACHED_COND (insn);
3541       INSN_COND_DEPS (insn) = NULL_RTX;
3542       if (reload_completed
3543 	  && (current_sched_info->flags & DO_PREDICATION)
3544 	  && COMPARISON_P (t)
3545 	  && REG_P (XEXP (t, 0))
3546 	  && CONSTANT_P (XEXP (t, 1)))
3547 	{
3548 	  unsigned int regno;
3549 	  int nregs;
3550 	  t = XEXP (t, 0);
3551 	  regno = REGNO (t);
3552 	  nregs = hard_regno_nregs[regno][GET_MODE (t)];
3553 	  t = NULL_RTX;
3554 	  while (nregs-- > 0)
3555 	    {
3556 	      struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3557 	      t = concat_INSN_LIST (reg_last->sets, t);
3558 	      t = concat_INSN_LIST (reg_last->clobbers, t);
3559 	      t = concat_INSN_LIST (reg_last->implicit_sets, t);
3560 	    }
3561 	  INSN_COND_DEPS (insn) = t;
3562 	}
3563     }
3564 
3565   if (JUMP_P (insn))
3566     {
3567       /* Make each JUMP_INSN (but not a speculative check)
3568          a scheduling barrier for memory references.  */
3569       if (!deps->readonly
3570           && !(sel_sched_p ()
3571                && sel_insn_is_speculation_check (insn)))
3572         {
3573           /* Keep the list a reasonable size.  */
3574           if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
3575             flush_pending_lists (deps, insn, true, true);
3576           else
3577 	    deps->pending_jump_insns
3578               = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3579         }
3580 
3581       /* For each insn which shouldn't cross a jump, add a dependence.  */
3582       add_dependence_list_and_free (deps, insn,
3583 				    &deps->sched_before_next_jump, 1,
3584 				    REG_DEP_ANTI, true);
3585 
3586       sched_analyze_insn (deps, PATTERN (insn), insn);
3587     }
3588   else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3589     {
3590       sched_analyze_insn (deps, PATTERN (insn), insn);
3591     }
3592   else if (CALL_P (insn))
3593     {
3594       int i;
3595 
3596       CANT_MOVE (insn) = 1;
3597 
3598       if (find_reg_note (insn, REG_SETJMP, NULL))
3599         {
3600           /* This is setjmp.  Assume that all registers, not just
3601              hard registers, may be clobbered by this call.  */
3602           reg_pending_barrier = MOVE_BARRIER;
3603         }
3604       else
3605         {
3606           for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3607             /* A call may read and modify global register variables.  */
3608             if (global_regs[i])
3609               {
3610                 SET_REGNO_REG_SET (reg_pending_sets, i);
3611                 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3612               }
3613           /* Other call-clobbered hard regs may be clobbered.
3614              Since we only have a choice between 'might be clobbered'
3615              and 'definitely not clobbered', we must include all
3616              partly call-clobbered registers here.  */
3617             else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3618                      || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3619               SET_REGNO_REG_SET (reg_pending_clobbers, i);
3620           /* We don't know what set of fixed registers might be used
3621              by the function, but it is certain that the stack pointer
3622              is among them, but be conservative.  */
3623             else if (fixed_regs[i])
3624 	      SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3625           /* The frame pointer is normally not used by the function
3626              itself, but by the debugger.  */
3627           /* ??? MIPS o32 is an exception.  It uses the frame pointer
3628              in the macro expansion of jal but does not represent this
3629              fact in the call_insn rtl.  */
3630             else if (i == FRAME_POINTER_REGNUM
3631                      || (i == HARD_FRAME_POINTER_REGNUM
3632                          && (! reload_completed || frame_pointer_needed)))
3633 	      SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3634         }
3635 
3636       /* For each insn which shouldn't cross a call, add a dependence
3637          between that insn and this call insn.  */
3638       add_dependence_list_and_free (deps, insn,
3639                                     &deps->sched_before_next_call, 1,
3640                                     REG_DEP_ANTI, true);
3641 
3642       sched_analyze_insn (deps, PATTERN (insn), insn);
3643 
3644       /* If CALL would be in a sched group, then this will violate
3645 	 convention that sched group insns have dependencies only on the
3646 	 previous instruction.
3647 
3648 	 Of course one can say: "Hey!  What about head of the sched group?"
3649 	 And I will answer: "Basic principles (one dep per insn) are always
3650 	 the same."  */
3651       gcc_assert (!SCHED_GROUP_P (insn));
3652 
3653       /* In the absence of interprocedural alias analysis, we must flush
3654          all pending reads and writes, and start new dependencies starting
3655          from here.  But only flush writes for constant calls (which may
3656          be passed a pointer to something we haven't written yet).  */
3657       flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3658 
3659       if (!deps->readonly)
3660         {
3661           /* Remember the last function call for limiting lifetimes.  */
3662           free_INSN_LIST_list (&deps->last_function_call);
3663           deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3664 
3665 	  if (call_may_noreturn_p (insn))
3666 	    {
3667 	      /* Remember the last function call that might not always return
3668 		 normally for limiting moves of trapping insns.  */
3669 	      free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3670 	      deps->last_function_call_may_noreturn
3671 		= alloc_INSN_LIST (insn, NULL_RTX);
3672 	    }
3673 
3674           /* Before reload, begin a post-call group, so as to keep the
3675              lifetimes of hard registers correct.  */
3676           if (! reload_completed)
3677             deps->in_post_call_group_p = post_call;
3678         }
3679     }
3680 
3681   if (sched_deps_info->use_cselib)
3682     cselib_process_insn (insn);
3683 
3684   /* EH_REGION insn notes can not appear until well after we complete
3685      scheduling.  */
3686   if (NOTE_P (insn))
3687     gcc_assert (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
3688 		&& NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END);
3689 
3690   if (sched_deps_info->finish_insn)
3691     sched_deps_info->finish_insn ();
3692 
3693   /* Fixup the dependencies in the sched group.  */
3694   if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3695       && chain_to_prev_insn_p (insn)
3696       && !sel_sched_p ())
3697     chain_to_prev_insn (insn);
3698 }
3699 
3700 /* Initialize DEPS for the new block beginning with HEAD.  */
3701 void
deps_start_bb(struct deps_desc * deps,rtx head)3702 deps_start_bb (struct deps_desc *deps, rtx head)
3703 {
3704   gcc_assert (!deps->readonly);
3705 
3706   /* Before reload, if the previous block ended in a call, show that
3707      we are inside a post-call group, so as to keep the lifetimes of
3708      hard registers correct.  */
3709   if (! reload_completed && !LABEL_P (head))
3710     {
3711       rtx insn = prev_nonnote_nondebug_insn (head);
3712 
3713       if (insn && CALL_P (insn))
3714 	deps->in_post_call_group_p = post_call_initial;
3715     }
3716 }
3717 
3718 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3719    dependencies for each insn.  */
3720 void
sched_analyze(struct deps_desc * deps,rtx head,rtx tail)3721 sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
3722 {
3723   rtx insn;
3724 
3725   if (sched_deps_info->use_cselib)
3726     cselib_init (CSELIB_RECORD_MEMORY);
3727 
3728   deps_start_bb (deps, head);
3729 
3730   for (insn = head;; insn = NEXT_INSN (insn))
3731     {
3732 
3733       if (INSN_P (insn))
3734 	{
3735 	  /* And initialize deps_lists.  */
3736 	  sd_init_insn (insn);
3737 	}
3738 
3739       deps_analyze_insn (deps, insn);
3740 
3741       if (insn == tail)
3742 	{
3743 	  if (sched_deps_info->use_cselib)
3744 	    cselib_finish ();
3745 	  return;
3746 	}
3747     }
3748   gcc_unreachable ();
3749 }
3750 
3751 /* Helper for sched_free_deps ().
3752    Delete INSN's (RESOLVED_P) backward dependencies.  */
3753 static void
delete_dep_nodes_in_back_deps(rtx insn,bool resolved_p)3754 delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
3755 {
3756   sd_iterator_def sd_it;
3757   dep_t dep;
3758   sd_list_types_def types;
3759 
3760   if (resolved_p)
3761     types = SD_LIST_RES_BACK;
3762   else
3763     types = SD_LIST_BACK;
3764 
3765   for (sd_it = sd_iterator_start (insn, types);
3766        sd_iterator_cond (&sd_it, &dep);)
3767     {
3768       dep_link_t link = *sd_it.linkp;
3769       dep_node_t node = DEP_LINK_NODE (link);
3770       deps_list_t back_list;
3771       deps_list_t forw_list;
3772 
3773       get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3774       remove_from_deps_list (link, back_list);
3775       delete_dep_node (node);
3776     }
3777 }
3778 
3779 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3780    deps_lists.  */
3781 void
sched_free_deps(rtx head,rtx tail,bool resolved_p)3782 sched_free_deps (rtx head, rtx tail, bool resolved_p)
3783 {
3784   rtx insn;
3785   rtx next_tail = NEXT_INSN (tail);
3786 
3787   /* We make two passes since some insns may be scheduled before their
3788      dependencies are resolved.  */
3789   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3790     if (INSN_P (insn) && INSN_LUID (insn) > 0)
3791       {
3792 	/* Clear forward deps and leave the dep_nodes to the
3793 	   corresponding back_deps list.  */
3794 	if (resolved_p)
3795 	  clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3796 	else
3797 	  clear_deps_list (INSN_FORW_DEPS (insn));
3798       }
3799   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3800     if (INSN_P (insn) && INSN_LUID (insn) > 0)
3801       {
3802 	/* Clear resolved back deps together with its dep_nodes.  */
3803 	delete_dep_nodes_in_back_deps (insn, resolved_p);
3804 
3805 	sd_finish_insn (insn);
3806       }
3807 }
3808 
3809 /* Initialize variables for region data dependence analysis.
3810    When LAZY_REG_LAST is true, do not allocate reg_last array
3811    of struct deps_desc immediately.  */
3812 
3813 void
init_deps(struct deps_desc * deps,bool lazy_reg_last)3814 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3815 {
3816   int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3817 
3818   deps->max_reg = max_reg;
3819   if (lazy_reg_last)
3820     deps->reg_last = NULL;
3821   else
3822     deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3823   INIT_REG_SET (&deps->reg_last_in_use);
3824 
3825   deps->pending_read_insns = 0;
3826   deps->pending_read_mems = 0;
3827   deps->pending_write_insns = 0;
3828   deps->pending_write_mems = 0;
3829   deps->pending_jump_insns = 0;
3830   deps->pending_read_list_length = 0;
3831   deps->pending_write_list_length = 0;
3832   deps->pending_flush_length = 0;
3833   deps->last_pending_memory_flush = 0;
3834   deps->last_function_call = 0;
3835   deps->last_function_call_may_noreturn = 0;
3836   deps->sched_before_next_call = 0;
3837   deps->sched_before_next_jump = 0;
3838   deps->in_post_call_group_p = not_post_call;
3839   deps->last_debug_insn = 0;
3840   deps->last_reg_pending_barrier = NOT_A_BARRIER;
3841   deps->readonly = 0;
3842 }
3843 
3844 /* Init only reg_last field of DEPS, which was not allocated before as
3845    we inited DEPS lazily.  */
3846 void
init_deps_reg_last(struct deps_desc * deps)3847 init_deps_reg_last (struct deps_desc *deps)
3848 {
3849   gcc_assert (deps && deps->max_reg > 0);
3850   gcc_assert (deps->reg_last == NULL);
3851 
3852   deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3853 }
3854 
3855 
3856 /* Free insn lists found in DEPS.  */
3857 
3858 void
free_deps(struct deps_desc * deps)3859 free_deps (struct deps_desc *deps)
3860 {
3861   unsigned i;
3862   reg_set_iterator rsi;
3863 
3864   /* We set max_reg to 0 when this context was already freed.  */
3865   if (deps->max_reg == 0)
3866     {
3867       gcc_assert (deps->reg_last == NULL);
3868       return;
3869     }
3870   deps->max_reg = 0;
3871 
3872   free_INSN_LIST_list (&deps->pending_read_insns);
3873   free_EXPR_LIST_list (&deps->pending_read_mems);
3874   free_INSN_LIST_list (&deps->pending_write_insns);
3875   free_EXPR_LIST_list (&deps->pending_write_mems);
3876   free_INSN_LIST_list (&deps->last_pending_memory_flush);
3877 
3878   /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3879      times.  For a testcase with 42000 regs and 8000 small basic blocks,
3880      this loop accounted for nearly 60% (84 sec) of the total -O2 runtime.  */
3881   EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3882     {
3883       struct deps_reg *reg_last = &deps->reg_last[i];
3884       if (reg_last->uses)
3885 	free_INSN_LIST_list (&reg_last->uses);
3886       if (reg_last->sets)
3887 	free_INSN_LIST_list (&reg_last->sets);
3888       if (reg_last->implicit_sets)
3889 	free_INSN_LIST_list (&reg_last->implicit_sets);
3890       if (reg_last->control_uses)
3891 	free_INSN_LIST_list (&reg_last->control_uses);
3892       if (reg_last->clobbers)
3893 	free_INSN_LIST_list (&reg_last->clobbers);
3894     }
3895   CLEAR_REG_SET (&deps->reg_last_in_use);
3896 
3897   /* As we initialize reg_last lazily, it is possible that we didn't allocate
3898      it at all.  */
3899   free (deps->reg_last);
3900   deps->reg_last = NULL;
3901 
3902   deps = NULL;
3903 }
3904 
3905 /* Remove INSN from dependence contexts DEPS.  */
3906 void
remove_from_deps(struct deps_desc * deps,rtx insn)3907 remove_from_deps (struct deps_desc *deps, rtx insn)
3908 {
3909   int removed;
3910   unsigned i;
3911   reg_set_iterator rsi;
3912 
3913   removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3914                                                &deps->pending_read_mems);
3915   if (!DEBUG_INSN_P (insn))
3916     deps->pending_read_list_length -= removed;
3917   removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3918                                                &deps->pending_write_mems);
3919   deps->pending_write_list_length -= removed;
3920 
3921   removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
3922   deps->pending_flush_length -= removed;
3923   removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3924   deps->pending_flush_length -= removed;
3925 
3926   EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3927     {
3928       struct deps_reg *reg_last = &deps->reg_last[i];
3929       if (reg_last->uses)
3930 	remove_from_dependence_list (insn, &reg_last->uses);
3931       if (reg_last->sets)
3932 	remove_from_dependence_list (insn, &reg_last->sets);
3933       if (reg_last->implicit_sets)
3934 	remove_from_dependence_list (insn, &reg_last->implicit_sets);
3935       if (reg_last->clobbers)
3936 	remove_from_dependence_list (insn, &reg_last->clobbers);
3937       if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
3938 	  && !reg_last->clobbers)
3939         CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
3940     }
3941 
3942   if (CALL_P (insn))
3943     {
3944       remove_from_dependence_list (insn, &deps->last_function_call);
3945       remove_from_dependence_list (insn,
3946 				   &deps->last_function_call_may_noreturn);
3947     }
3948   remove_from_dependence_list (insn, &deps->sched_before_next_call);
3949 }
3950 
3951 /* Init deps data vector.  */
3952 static void
init_deps_data_vector(void)3953 init_deps_data_vector (void)
3954 {
3955   int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
3956   if (reserve > 0 && ! h_d_i_d.space (reserve))
3957     h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
3958 }
3959 
3960 /* If it is profitable to use them, initialize or extend (depending on
3961    GLOBAL_P) dependency data.  */
3962 void
sched_deps_init(bool global_p)3963 sched_deps_init (bool global_p)
3964 {
3965   /* Average number of insns in the basic block.
3966      '+ 1' is used to make it nonzero.  */
3967   int insns_in_block = sched_max_luid / n_basic_blocks + 1;
3968 
3969   init_deps_data_vector ();
3970 
3971   /* We use another caching mechanism for selective scheduling, so
3972      we don't use this one.  */
3973   if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
3974     {
3975       /* ?!? We could save some memory by computing a per-region luid mapping
3976          which could reduce both the number of vectors in the cache and the
3977          size of each vector.  Instead we just avoid the cache entirely unless
3978          the average number of instructions in a basic block is very high.  See
3979          the comment before the declaration of true_dependency_cache for
3980          what we consider "very high".  */
3981       cache_size = 0;
3982       extend_dependency_caches (sched_max_luid, true);
3983     }
3984 
3985   if (global_p)
3986     {
3987       dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
3988                                    /* Allocate lists for one block at a time.  */
3989                                    insns_in_block);
3990       dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
3991                                    /* Allocate nodes for one block at a time.
3992                                       We assume that average insn has
3993                                       5 producers.  */
3994                                    5 * insns_in_block);
3995     }
3996 }
3997 
3998 
3999 /* Create or extend (depending on CREATE_P) dependency caches to
4000    size N.  */
4001 void
extend_dependency_caches(int n,bool create_p)4002 extend_dependency_caches (int n, bool create_p)
4003 {
4004   if (create_p || true_dependency_cache)
4005     {
4006       int i, luid = cache_size + n;
4007 
4008       true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4009 					  luid);
4010       output_dependency_cache = XRESIZEVEC (bitmap_head,
4011 					    output_dependency_cache, luid);
4012       anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4013 					  luid);
4014       control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4015 					  luid);
4016 
4017       if (current_sched_info->flags & DO_SPECULATION)
4018         spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4019 					    luid);
4020 
4021       for (i = cache_size; i < luid; i++)
4022 	{
4023 	  bitmap_initialize (&true_dependency_cache[i], 0);
4024 	  bitmap_initialize (&output_dependency_cache[i], 0);
4025 	  bitmap_initialize (&anti_dependency_cache[i], 0);
4026 	  bitmap_initialize (&control_dependency_cache[i], 0);
4027 
4028           if (current_sched_info->flags & DO_SPECULATION)
4029             bitmap_initialize (&spec_dependency_cache[i], 0);
4030 	}
4031       cache_size = luid;
4032     }
4033 }
4034 
4035 /* Finalize dependency information for the whole function.  */
4036 void
sched_deps_finish(void)4037 sched_deps_finish (void)
4038 {
4039   gcc_assert (deps_pools_are_empty_p ());
4040   free_alloc_pool_if_empty (&dn_pool);
4041   free_alloc_pool_if_empty (&dl_pool);
4042   gcc_assert (dn_pool == NULL && dl_pool == NULL);
4043 
4044   h_d_i_d.release ();
4045   cache_size = 0;
4046 
4047   if (true_dependency_cache)
4048     {
4049       int i;
4050 
4051       for (i = 0; i < cache_size; i++)
4052 	{
4053 	  bitmap_clear (&true_dependency_cache[i]);
4054 	  bitmap_clear (&output_dependency_cache[i]);
4055 	  bitmap_clear (&anti_dependency_cache[i]);
4056 	  bitmap_clear (&control_dependency_cache[i]);
4057 
4058           if (sched_deps_info->generate_spec_deps)
4059             bitmap_clear (&spec_dependency_cache[i]);
4060 	}
4061       free (true_dependency_cache);
4062       true_dependency_cache = NULL;
4063       free (output_dependency_cache);
4064       output_dependency_cache = NULL;
4065       free (anti_dependency_cache);
4066       anti_dependency_cache = NULL;
4067       free (control_dependency_cache);
4068       control_dependency_cache = NULL;
4069 
4070       if (sched_deps_info->generate_spec_deps)
4071         {
4072           free (spec_dependency_cache);
4073           spec_dependency_cache = NULL;
4074         }
4075 
4076     }
4077 }
4078 
4079 /* Initialize some global variables needed by the dependency analysis
4080    code.  */
4081 
4082 void
init_deps_global(void)4083 init_deps_global (void)
4084 {
4085   CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4086   CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4087   reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4088   reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4089   reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
4090   reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
4091   reg_pending_barrier = NOT_A_BARRIER;
4092 
4093   if (!sel_sched_p () || sched_emulate_haifa_p)
4094     {
4095       sched_deps_info->start_insn = haifa_start_insn;
4096       sched_deps_info->finish_insn = haifa_finish_insn;
4097 
4098       sched_deps_info->note_reg_set = haifa_note_reg_set;
4099       sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4100       sched_deps_info->note_reg_use = haifa_note_reg_use;
4101 
4102       sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4103       sched_deps_info->note_dep = haifa_note_dep;
4104    }
4105 }
4106 
4107 /* Free everything used by the dependency analysis code.  */
4108 
4109 void
finish_deps_global(void)4110 finish_deps_global (void)
4111 {
4112   FREE_REG_SET (reg_pending_sets);
4113   FREE_REG_SET (reg_pending_clobbers);
4114   FREE_REG_SET (reg_pending_uses);
4115   FREE_REG_SET (reg_pending_control_uses);
4116 }
4117 
4118 /* Estimate the weakness of dependence between MEM1 and MEM2.  */
4119 dw_t
estimate_dep_weak(rtx mem1,rtx mem2)4120 estimate_dep_weak (rtx mem1, rtx mem2)
4121 {
4122   rtx r1, r2;
4123 
4124   if (mem1 == mem2)
4125     /* MEMs are the same - don't speculate.  */
4126     return MIN_DEP_WEAK;
4127 
4128   r1 = XEXP (mem1, 0);
4129   r2 = XEXP (mem2, 0);
4130 
4131   if (r1 == r2
4132       || (REG_P (r1) && REG_P (r2)
4133 	  && REGNO (r1) == REGNO (r2)))
4134     /* Again, MEMs are the same.  */
4135     return MIN_DEP_WEAK;
4136   else if ((REG_P (r1) && !REG_P (r2))
4137 	   || (!REG_P (r1) && REG_P (r2)))
4138     /* Different addressing modes - reason to be more speculative,
4139        than usual.  */
4140     return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4141   else
4142     /* We can't say anything about the dependence.  */
4143     return UNCERTAIN_DEP_WEAK;
4144 }
4145 
4146 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4147    This function can handle same INSN and ELEM (INSN == ELEM).
4148    It is a convenience wrapper.  */
4149 static void
add_dependence_1(rtx insn,rtx elem,enum reg_note dep_type)4150 add_dependence_1 (rtx insn, rtx elem, enum reg_note dep_type)
4151 {
4152   ds_t ds;
4153   bool internal;
4154 
4155   if (dep_type == REG_DEP_TRUE)
4156     ds = DEP_TRUE;
4157   else if (dep_type == REG_DEP_OUTPUT)
4158     ds = DEP_OUTPUT;
4159   else if (dep_type == REG_DEP_CONTROL)
4160     ds = DEP_CONTROL;
4161   else
4162     {
4163       gcc_assert (dep_type == REG_DEP_ANTI);
4164       ds = DEP_ANTI;
4165     }
4166 
4167   /* When add_dependence is called from inside sched-deps.c, we expect
4168      cur_insn to be non-null.  */
4169   internal = cur_insn != NULL;
4170   if (internal)
4171     gcc_assert (insn == cur_insn);
4172   else
4173     cur_insn = insn;
4174 
4175   note_dep (elem, ds);
4176   if (!internal)
4177     cur_insn = NULL;
4178 }
4179 
4180 /* Return weakness of speculative type TYPE in the dep_status DS.  */
4181 dw_t
get_dep_weak_1(ds_t ds,ds_t type)4182 get_dep_weak_1 (ds_t ds, ds_t type)
4183 {
4184   ds = ds & type;
4185 
4186   switch (type)
4187     {
4188     case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4189     case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4190     case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4191     case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4192     default: gcc_unreachable ();
4193     }
4194 
4195   return (dw_t) ds;
4196 }
4197 
4198 dw_t
get_dep_weak(ds_t ds,ds_t type)4199 get_dep_weak (ds_t ds, ds_t type)
4200 {
4201   dw_t dw = get_dep_weak_1 (ds, type);
4202 
4203   gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4204   return dw;
4205 }
4206 
4207 /* Return the dep_status, which has the same parameters as DS, except for
4208    speculative type TYPE, that will have weakness DW.  */
4209 ds_t
set_dep_weak(ds_t ds,ds_t type,dw_t dw)4210 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4211 {
4212   gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4213 
4214   ds &= ~type;
4215   switch (type)
4216     {
4217     case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4218     case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4219     case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4220     case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4221     default: gcc_unreachable ();
4222     }
4223   return ds;
4224 }
4225 
4226 /* Return the join of two dep_statuses DS1 and DS2.
4227    If MAX_P is true then choose the greater probability,
4228    otherwise multiply probabilities.
4229    This function assumes that both DS1 and DS2 contain speculative bits.  */
4230 static ds_t
ds_merge_1(ds_t ds1,ds_t ds2,bool max_p)4231 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4232 {
4233   ds_t ds, t;
4234 
4235   gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4236 
4237   ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4238 
4239   t = FIRST_SPEC_TYPE;
4240   do
4241     {
4242       if ((ds1 & t) && !(ds2 & t))
4243 	ds |= ds1 & t;
4244       else if (!(ds1 & t) && (ds2 & t))
4245 	ds |= ds2 & t;
4246       else if ((ds1 & t) && (ds2 & t))
4247 	{
4248 	  dw_t dw1 = get_dep_weak (ds1, t);
4249 	  dw_t dw2 = get_dep_weak (ds2, t);
4250 	  ds_t dw;
4251 
4252 	  if (!max_p)
4253 	    {
4254 	      dw = ((ds_t) dw1) * ((ds_t) dw2);
4255 	      dw /= MAX_DEP_WEAK;
4256 	      if (dw < MIN_DEP_WEAK)
4257 		dw = MIN_DEP_WEAK;
4258 	    }
4259 	  else
4260 	    {
4261 	      if (dw1 >= dw2)
4262 		dw = dw1;
4263 	      else
4264 		dw = dw2;
4265 	    }
4266 
4267 	  ds = set_dep_weak (ds, t, (dw_t) dw);
4268 	}
4269 
4270       if (t == LAST_SPEC_TYPE)
4271 	break;
4272       t <<= SPEC_TYPE_SHIFT;
4273     }
4274   while (1);
4275 
4276   return ds;
4277 }
4278 
4279 /* Return the join of two dep_statuses DS1 and DS2.
4280    This function assumes that both DS1 and DS2 contain speculative bits.  */
4281 ds_t
ds_merge(ds_t ds1,ds_t ds2)4282 ds_merge (ds_t ds1, ds_t ds2)
4283 {
4284   return ds_merge_1 (ds1, ds2, false);
4285 }
4286 
4287 /* Return the join of two dep_statuses DS1 and DS2.  */
4288 ds_t
ds_full_merge(ds_t ds,ds_t ds2,rtx mem1,rtx mem2)4289 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4290 {
4291   ds_t new_status = ds | ds2;
4292 
4293   if (new_status & SPECULATIVE)
4294     {
4295       if ((ds && !(ds & SPECULATIVE))
4296 	  || (ds2 && !(ds2 & SPECULATIVE)))
4297 	/* Then this dep can't be speculative.  */
4298 	new_status &= ~SPECULATIVE;
4299       else
4300 	{
4301 	  /* Both are speculative.  Merging probabilities.  */
4302 	  if (mem1)
4303 	    {
4304 	      dw_t dw;
4305 
4306 	      dw = estimate_dep_weak (mem1, mem2);
4307 	      ds = set_dep_weak (ds, BEGIN_DATA, dw);
4308 	    }
4309 
4310 	  if (!ds)
4311 	    new_status = ds2;
4312 	  else if (!ds2)
4313 	    new_status = ds;
4314 	  else
4315 	    new_status = ds_merge (ds2, ds);
4316 	}
4317     }
4318 
4319   return new_status;
4320 }
4321 
4322 /* Return the join of DS1 and DS2.  Use maximum instead of multiplying
4323    probabilities.  */
4324 ds_t
ds_max_merge(ds_t ds1,ds_t ds2)4325 ds_max_merge (ds_t ds1, ds_t ds2)
4326 {
4327   if (ds1 == 0 && ds2 == 0)
4328     return 0;
4329 
4330   if (ds1 == 0 && ds2 != 0)
4331     return ds2;
4332 
4333   if (ds1 != 0 && ds2 == 0)
4334     return ds1;
4335 
4336   return ds_merge_1 (ds1, ds2, true);
4337 }
4338 
4339 /* Return the probability of speculation success for the speculation
4340    status DS.  */
4341 dw_t
ds_weak(ds_t ds)4342 ds_weak (ds_t ds)
4343 {
4344   ds_t res = 1, dt;
4345   int n = 0;
4346 
4347   dt = FIRST_SPEC_TYPE;
4348   do
4349     {
4350       if (ds & dt)
4351 	{
4352 	  res *= (ds_t) get_dep_weak (ds, dt);
4353 	  n++;
4354 	}
4355 
4356       if (dt == LAST_SPEC_TYPE)
4357 	break;
4358       dt <<= SPEC_TYPE_SHIFT;
4359     }
4360   while (1);
4361 
4362   gcc_assert (n);
4363   while (--n)
4364     res /= MAX_DEP_WEAK;
4365 
4366   if (res < MIN_DEP_WEAK)
4367     res = MIN_DEP_WEAK;
4368 
4369   gcc_assert (res <= MAX_DEP_WEAK);
4370 
4371   return (dw_t) res;
4372 }
4373 
4374 /* Return a dep status that contains all speculation types of DS.  */
4375 ds_t
ds_get_speculation_types(ds_t ds)4376 ds_get_speculation_types (ds_t ds)
4377 {
4378   if (ds & BEGIN_DATA)
4379     ds |= BEGIN_DATA;
4380   if (ds & BE_IN_DATA)
4381     ds |= BE_IN_DATA;
4382   if (ds & BEGIN_CONTROL)
4383     ds |= BEGIN_CONTROL;
4384   if (ds & BE_IN_CONTROL)
4385     ds |= BE_IN_CONTROL;
4386 
4387   return ds & SPECULATIVE;
4388 }
4389 
4390 /* Return a dep status that contains maximal weakness for each speculation
4391    type present in DS.  */
4392 ds_t
ds_get_max_dep_weak(ds_t ds)4393 ds_get_max_dep_weak (ds_t ds)
4394 {
4395   if (ds & BEGIN_DATA)
4396     ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4397   if (ds & BE_IN_DATA)
4398     ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4399   if (ds & BEGIN_CONTROL)
4400     ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4401   if (ds & BE_IN_CONTROL)
4402     ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4403 
4404   return ds;
4405 }
4406 
4407 /* Dump information about the dependence status S.  */
4408 static void
dump_ds(FILE * f,ds_t s)4409 dump_ds (FILE *f, ds_t s)
4410 {
4411   fprintf (f, "{");
4412 
4413   if (s & BEGIN_DATA)
4414     fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4415   if (s & BE_IN_DATA)
4416     fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4417   if (s & BEGIN_CONTROL)
4418     fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4419   if (s & BE_IN_CONTROL)
4420     fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4421 
4422   if (s & HARD_DEP)
4423     fprintf (f, "HARD_DEP; ");
4424 
4425   if (s & DEP_TRUE)
4426     fprintf (f, "DEP_TRUE; ");
4427   if (s & DEP_OUTPUT)
4428     fprintf (f, "DEP_OUTPUT; ");
4429   if (s & DEP_ANTI)
4430     fprintf (f, "DEP_ANTI; ");
4431   if (s & DEP_CONTROL)
4432     fprintf (f, "DEP_CONTROL; ");
4433 
4434   fprintf (f, "}");
4435 }
4436 
4437 DEBUG_FUNCTION void
debug_ds(ds_t s)4438 debug_ds (ds_t s)
4439 {
4440   dump_ds (stderr, s);
4441   fprintf (stderr, "\n");
4442 }
4443 
4444 #ifdef ENABLE_CHECKING
4445 /* Verify that dependence type and status are consistent.
4446    If RELAXED_P is true, then skip dep_weakness checks.  */
4447 static void
check_dep(dep_t dep,bool relaxed_p)4448 check_dep (dep_t dep, bool relaxed_p)
4449 {
4450   enum reg_note dt = DEP_TYPE (dep);
4451   ds_t ds = DEP_STATUS (dep);
4452 
4453   gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4454 
4455   if (!(current_sched_info->flags & USE_DEPS_LIST))
4456     {
4457       gcc_assert (ds == 0);
4458       return;
4459     }
4460 
4461   /* Check that dependence type contains the same bits as the status.  */
4462   if (dt == REG_DEP_TRUE)
4463     gcc_assert (ds & DEP_TRUE);
4464   else if (dt == REG_DEP_OUTPUT)
4465     gcc_assert ((ds & DEP_OUTPUT)
4466 		&& !(ds & DEP_TRUE));
4467   else if (dt == REG_DEP_ANTI)
4468     gcc_assert ((ds & DEP_ANTI)
4469 		&& !(ds & (DEP_OUTPUT | DEP_TRUE)));
4470   else
4471     gcc_assert (dt == REG_DEP_CONTROL
4472 		&& (ds & DEP_CONTROL)
4473 		&& !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4474 
4475   /* HARD_DEP can not appear in dep_status of a link.  */
4476   gcc_assert (!(ds & HARD_DEP));
4477 
4478   /* Check that dependence status is set correctly when speculation is not
4479      supported.  */
4480   if (!sched_deps_info->generate_spec_deps)
4481     gcc_assert (!(ds & SPECULATIVE));
4482   else if (ds & SPECULATIVE)
4483     {
4484       if (!relaxed_p)
4485 	{
4486 	  ds_t type = FIRST_SPEC_TYPE;
4487 
4488 	  /* Check that dependence weakness is in proper range.  */
4489 	  do
4490 	    {
4491 	      if (ds & type)
4492 		get_dep_weak (ds, type);
4493 
4494 	      if (type == LAST_SPEC_TYPE)
4495 		break;
4496 	      type <<= SPEC_TYPE_SHIFT;
4497 	    }
4498 	  while (1);
4499 	}
4500 
4501       if (ds & BEGIN_SPEC)
4502 	{
4503 	  /* Only true dependence can be data speculative.  */
4504 	  if (ds & BEGIN_DATA)
4505 	    gcc_assert (ds & DEP_TRUE);
4506 
4507 	  /* Control dependencies in the insn scheduler are represented by
4508 	     anti-dependencies, therefore only anti dependence can be
4509 	     control speculative.  */
4510 	  if (ds & BEGIN_CONTROL)
4511 	    gcc_assert (ds & DEP_ANTI);
4512 	}
4513       else
4514 	{
4515 	  /* Subsequent speculations should resolve true dependencies.  */
4516 	  gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4517 	}
4518 
4519       /* Check that true and anti dependencies can't have other speculative
4520 	 statuses.  */
4521       if (ds & DEP_TRUE)
4522 	gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4523       /* An output dependence can't be speculative at all.  */
4524       gcc_assert (!(ds & DEP_OUTPUT));
4525       if (ds & DEP_ANTI)
4526 	gcc_assert (ds & BEGIN_CONTROL);
4527     }
4528 }
4529 #endif /* ENABLE_CHECKING */
4530 
4531 /* The following code discovers opportunities to switch a memory reference
4532    and an increment by modifying the address.  We ensure that this is done
4533    only for dependencies that are only used to show a single register
4534    dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4535    instruction involved is subject to only one dep that can cause a pattern
4536    change.
4537 
4538    When we discover a suitable dependency, we fill in the dep_replacement
4539    structure to show how to modify the memory reference.  */
4540 
4541 /* Holds information about a pair of memory reference and register increment
4542    insns which depend on each other, but could possibly be interchanged.  */
4543 struct mem_inc_info
4544 {
4545   rtx inc_insn;
4546   rtx mem_insn;
4547 
4548   rtx *mem_loc;
4549   /* A register occurring in the memory address for which we wish to break
4550      the dependence.  This must be identical to the destination register of
4551      the increment.  */
4552   rtx mem_reg0;
4553   /* Any kind of index that is added to that register.  */
4554   rtx mem_index;
4555   /* The constant offset used in the memory address.  */
4556   HOST_WIDE_INT mem_constant;
4557   /* The constant added in the increment insn.  Negated if the increment is
4558      after the memory address.  */
4559   HOST_WIDE_INT inc_constant;
4560   /* The source register used in the increment.  May be different from mem_reg0
4561      if the increment occurs before the memory address.  */
4562   rtx inc_input;
4563 };
4564 
4565 /* Verify that the memory location described in MII can be replaced with
4566    one using NEW_ADDR.  Return the new memory reference or NULL_RTX.  The
4567    insn remains unchanged by this function.  */
4568 
4569 static rtx
attempt_change(struct mem_inc_info * mii,rtx new_addr)4570 attempt_change (struct mem_inc_info *mii, rtx new_addr)
4571 {
4572   rtx mem = *mii->mem_loc;
4573   rtx new_mem;
4574 
4575   /* Jump thru a lot of hoops to keep the attributes up to date.  We
4576      do not want to call one of the change address variants that take
4577      an offset even though we know the offset in many cases.  These
4578      assume you are changing where the address is pointing by the
4579      offset.  */
4580   new_mem = replace_equiv_address_nv (mem, new_addr);
4581   if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4582     {
4583       if (sched_verbose >= 5)
4584 	fprintf (sched_dump, "validation failure\n");
4585       return NULL_RTX;
4586     }
4587 
4588   /* Put back the old one.  */
4589   validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4590 
4591   return new_mem;
4592 }
4593 
4594 /* Return true if INSN is of a form "a = b op c" where a and b are
4595    regs.  op is + if c is a reg and +|- if c is a const.  Fill in
4596    informantion in MII about what is found.
4597    BEFORE_MEM indicates whether the increment is found before or after
4598    a corresponding memory reference.  */
4599 
4600 static bool
parse_add_or_inc(struct mem_inc_info * mii,rtx insn,bool before_mem)4601 parse_add_or_inc (struct mem_inc_info *mii, rtx insn, bool before_mem)
4602 {
4603   rtx pat = single_set (insn);
4604   rtx src, cst;
4605   bool regs_equal;
4606 
4607   if (RTX_FRAME_RELATED_P (insn) || !pat)
4608     return false;
4609 
4610   /* Result must be single reg.  */
4611   if (!REG_P (SET_DEST (pat)))
4612     return false;
4613 
4614   if (GET_CODE (SET_SRC (pat)) != PLUS)
4615     return false;
4616 
4617   mii->inc_insn = insn;
4618   src = SET_SRC (pat);
4619   mii->inc_input = XEXP (src, 0);
4620 
4621   if (!REG_P (XEXP (src, 0)))
4622     return false;
4623 
4624   if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4625     return false;
4626 
4627   cst = XEXP (src, 1);
4628   if (!CONST_INT_P (cst))
4629     return false;
4630   mii->inc_constant = INTVAL (cst);
4631 
4632   regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4633 
4634   if (!before_mem)
4635     {
4636       mii->inc_constant = -mii->inc_constant;
4637       if (!regs_equal)
4638 	return false;
4639     }
4640 
4641   if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
4642     {
4643       /* Note that the sign has already been reversed for !before_mem.  */
4644 #ifdef STACK_GROWS_DOWNWARD
4645       return mii->inc_constant > 0;
4646 #else
4647       return mii->inc_constant < 0;
4648 #endif
4649     }
4650   return true;
4651 }
4652 
4653 /* Once a suitable mem reference has been found and the corresponding data
4654    in MII has been filled in, this function is called to find a suitable
4655    add or inc insn involving the register we found in the memory
4656    reference.  */
4657 
4658 static bool
find_inc(struct mem_inc_info * mii,bool backwards)4659 find_inc (struct mem_inc_info *mii, bool backwards)
4660 {
4661   sd_iterator_def sd_it;
4662   dep_t dep;
4663 
4664   sd_it = sd_iterator_start (mii->mem_insn,
4665 			     backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4666   while (sd_iterator_cond (&sd_it, &dep))
4667     {
4668       dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
4669       rtx pro = DEP_PRO (dep);
4670       rtx con = DEP_CON (dep);
4671       rtx inc_cand = backwards ? pro : con;
4672       if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4673 	goto next;
4674       if (parse_add_or_inc (mii, inc_cand, backwards))
4675 	{
4676 	  struct dep_replacement *desc;
4677 	  df_ref *def_rec;
4678 	  rtx newaddr, newmem;
4679 
4680 	  if (sched_verbose >= 5)
4681 	    fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4682 		     INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4683 
4684 	  /* Need to assure that none of the operands of the inc
4685 	     instruction are assigned to by the mem insn.  */
4686 	  for (def_rec = DF_INSN_DEFS (mii->mem_insn); *def_rec; def_rec++)
4687 	    {
4688 	      df_ref def = *def_rec;
4689 	      if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4690 		  || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4691 		{
4692 		  if (sched_verbose >= 5)
4693 		    fprintf (sched_dump,
4694 			     "inc conflicts with store failure.\n");
4695 		  goto next;
4696 		}
4697 	    }
4698 	  newaddr = mii->inc_input;
4699 	  if (mii->mem_index != NULL_RTX)
4700 	    newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4701 				    mii->mem_index);
4702 	  newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4703 				   mii->mem_constant + mii->inc_constant);
4704 	  newmem = attempt_change (mii, newaddr);
4705 	  if (newmem == NULL_RTX)
4706 	    goto next;
4707 	  if (sched_verbose >= 5)
4708 	    fprintf (sched_dump, "successful address replacement\n");
4709 	  desc = XCNEW (struct dep_replacement);
4710 	  DEP_REPLACE (dep) = desc;
4711 	  desc->loc = mii->mem_loc;
4712 	  desc->newval = newmem;
4713 	  desc->orig = *desc->loc;
4714 	  desc->insn = mii->mem_insn;
4715 	  move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4716 			 INSN_SPEC_BACK_DEPS (con));
4717 	  if (backwards)
4718 	    {
4719 	      FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
4720 		add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4721 				  REG_DEP_TRUE);
4722 	    }
4723 	  else
4724 	    {
4725 	      FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
4726 		add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4727 				  REG_DEP_ANTI);
4728 	    }
4729 	  return true;
4730 	}
4731     next:
4732       sd_iterator_next (&sd_it);
4733     }
4734   return false;
4735 }
4736 
4737 /* A recursive function that walks ADDRESS_OF_X to find memory references
4738    which could be modified during scheduling.  We call find_inc for each
4739    one we find that has a recognizable form.  MII holds information about
4740    the pair of memory/increment instructions.
4741    We ensure that every instruction with a memory reference (which will be
4742    the location of the replacement) is assigned at most one breakable
4743    dependency.  */
4744 
4745 static bool
find_mem(struct mem_inc_info * mii,rtx * address_of_x)4746 find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4747 {
4748   rtx x = *address_of_x;
4749   enum rtx_code code = GET_CODE (x);
4750   const char *const fmt = GET_RTX_FORMAT (code);
4751   int i;
4752 
4753   if (code == MEM)
4754     {
4755       rtx reg0 = XEXP (x, 0);
4756 
4757       mii->mem_loc = address_of_x;
4758       mii->mem_index = NULL_RTX;
4759       mii->mem_constant = 0;
4760       if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4761 	{
4762 	  mii->mem_constant = INTVAL (XEXP (reg0, 1));
4763 	  reg0 = XEXP (reg0, 0);
4764 	}
4765       if (GET_CODE (reg0) == PLUS)
4766 	{
4767 	  mii->mem_index = XEXP (reg0, 1);
4768 	  reg0 = XEXP (reg0, 0);
4769 	}
4770       if (REG_P (reg0))
4771 	{
4772 	  df_ref *def_rec;
4773 	  int occurrences = 0;
4774 
4775 	  /* Make sure this reg appears only once in this insn.  Can't use
4776 	     count_occurrences since that only works for pseudos.  */
4777 	  for (def_rec = DF_INSN_USES (mii->mem_insn); *def_rec; def_rec++)
4778 	    {
4779 	      df_ref def = *def_rec;
4780 	      if (reg_overlap_mentioned_p (reg0, DF_REF_REG (def)))
4781 		if (++occurrences > 1)
4782 		  {
4783 		    if (sched_verbose >= 5)
4784 		      fprintf (sched_dump, "mem count failure\n");
4785 		    return false;
4786 		  }
4787 	    }
4788 
4789 	  mii->mem_reg0 = reg0;
4790 	  return find_inc (mii, true) || find_inc (mii, false);
4791 	}
4792       return false;
4793     }
4794 
4795   if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4796     {
4797       /* If REG occurs inside a MEM used in a bit-field reference,
4798 	 that is unacceptable.  */
4799       return false;
4800     }
4801 
4802   /* Time for some deep diving.  */
4803   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4804     {
4805       if (fmt[i] == 'e')
4806 	{
4807 	  if (find_mem (mii, &XEXP (x, i)))
4808 	    return true;
4809 	}
4810       else if (fmt[i] == 'E')
4811 	{
4812 	  int j;
4813 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4814 	    if (find_mem (mii, &XVECEXP (x, i, j)))
4815 	      return true;
4816 	}
4817     }
4818   return false;
4819 }
4820 
4821 
4822 /* Examine the instructions between HEAD and TAIL and try to find
4823    dependencies that can be broken by modifying one of the patterns.  */
4824 
4825 void
find_modifiable_mems(rtx head,rtx tail)4826 find_modifiable_mems (rtx head, rtx tail)
4827 {
4828   rtx insn, next_tail = NEXT_INSN (tail);
4829   int success_in_block = 0;
4830 
4831   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4832     {
4833       struct mem_inc_info mii;
4834 
4835       if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4836 	continue;
4837 
4838       mii.mem_insn = insn;
4839       if (find_mem (&mii, &PATTERN (insn)))
4840 	success_in_block++;
4841     }
4842   if (success_in_block && sched_verbose >= 5)
4843     fprintf (sched_dump, "%d candidates for address modification found.\n",
4844 	     success_in_block);
4845 }
4846 
4847 #endif /* INSN_SCHEDULING */
4848