1 /* Instruction scheduling pass.  This file computes dependencies between
2    instructions.
3    Copyright (C) 1992-2016 Free Software Foundation, Inc.
4    Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5    and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 
7 This file is part of GCC.
8 
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13 
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
17 for more details.
18 
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3.  If not see
21 <http://www.gnu.org/licenses/>.  */
22 
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "df.h"
31 #include "insn-config.h"
32 #include "regs.h"
33 #include "ira.h"
34 #include "ira-int.h"
35 #include "insn-attr.h"
36 #include "cfgbuild.h"
37 #include "sched-int.h"
38 #include "params.h"
39 #include "cselib.h"
40 
41 #ifdef INSN_SCHEDULING
42 
43 /* Holds current parameters for the dependency analyzer.  */
44 struct sched_deps_info_def *sched_deps_info;
45 
46 /* The data is specific to the Haifa scheduler.  */
47 vec<haifa_deps_insn_data_def>
48     h_d_i_d = vNULL;
49 
50 /* Return the major type present in the DS.  */
51 enum reg_note
ds_to_dk(ds_t ds)52 ds_to_dk (ds_t ds)
53 {
54   if (ds & DEP_TRUE)
55     return REG_DEP_TRUE;
56 
57   if (ds & DEP_OUTPUT)
58     return REG_DEP_OUTPUT;
59 
60   if (ds & DEP_CONTROL)
61     return REG_DEP_CONTROL;
62 
63   gcc_assert (ds & DEP_ANTI);
64 
65   return REG_DEP_ANTI;
66 }
67 
68 /* Return equivalent dep_status.  */
69 ds_t
dk_to_ds(enum reg_note dk)70 dk_to_ds (enum reg_note dk)
71 {
72   switch (dk)
73     {
74     case REG_DEP_TRUE:
75       return DEP_TRUE;
76 
77     case REG_DEP_OUTPUT:
78       return DEP_OUTPUT;
79 
80     case REG_DEP_CONTROL:
81       return DEP_CONTROL;
82 
83     default:
84       gcc_assert (dk == REG_DEP_ANTI);
85       return DEP_ANTI;
86     }
87 }
88 
89 /* Functions to operate with dependence information container - dep_t.  */
90 
91 /* Init DEP with the arguments.  */
92 void
init_dep_1(dep_t dep,rtx_insn * pro,rtx_insn * con,enum reg_note type,ds_t ds)93 init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
94 {
95   DEP_PRO (dep) = pro;
96   DEP_CON (dep) = con;
97   DEP_TYPE (dep) = type;
98   DEP_STATUS (dep) = ds;
99   DEP_COST (dep) = UNKNOWN_DEP_COST;
100   DEP_NONREG (dep) = 0;
101   DEP_MULTIPLE (dep) = 0;
102   DEP_REPLACE (dep) = NULL;
103 }
104 
105 /* Init DEP with the arguments.
106    While most of the scheduler (including targets) only need the major type
107    of the dependency, it is convenient to hide full dep_status from them.  */
108 void
init_dep(dep_t dep,rtx_insn * pro,rtx_insn * con,enum reg_note kind)109 init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
110 {
111   ds_t ds;
112 
113   if ((current_sched_info->flags & USE_DEPS_LIST))
114     ds = dk_to_ds (kind);
115   else
116     ds = 0;
117 
118   init_dep_1 (dep, pro, con, kind, ds);
119 }
120 
121 /* Make a copy of FROM in TO.  */
122 static void
copy_dep(dep_t to,dep_t from)123 copy_dep (dep_t to, dep_t from)
124 {
125   memcpy (to, from, sizeof (*to));
126 }
127 
128 static void dump_ds (FILE *, ds_t);
129 
130 /* Define flags for dump_dep ().  */
131 
132 /* Dump producer of the dependence.  */
133 #define DUMP_DEP_PRO (2)
134 
135 /* Dump consumer of the dependence.  */
136 #define DUMP_DEP_CON (4)
137 
138 /* Dump type of the dependence.  */
139 #define DUMP_DEP_TYPE (8)
140 
141 /* Dump status of the dependence.  */
142 #define DUMP_DEP_STATUS (16)
143 
144 /* Dump all information about the dependence.  */
145 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE	\
146 		      |DUMP_DEP_STATUS)
147 
148 /* Dump DEP to DUMP.
149    FLAGS is a bit mask specifying what information about DEP needs
150    to be printed.
151    If FLAGS has the very first bit set, then dump all information about DEP
152    and propagate this bit into the callee dump functions.  */
153 static void
dump_dep(FILE * dump,dep_t dep,int flags)154 dump_dep (FILE *dump, dep_t dep, int flags)
155 {
156   if (flags & 1)
157     flags |= DUMP_DEP_ALL;
158 
159   fprintf (dump, "<");
160 
161   if (flags & DUMP_DEP_PRO)
162     fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
163 
164   if (flags & DUMP_DEP_CON)
165     fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
166 
167   if (flags & DUMP_DEP_TYPE)
168     {
169       char t;
170       enum reg_note type = DEP_TYPE (dep);
171 
172       switch (type)
173 	{
174 	case REG_DEP_TRUE:
175 	  t = 't';
176 	  break;
177 
178 	case REG_DEP_OUTPUT:
179 	  t = 'o';
180 	  break;
181 
182 	case REG_DEP_CONTROL:
183 	  t = 'c';
184 	  break;
185 
186 	case REG_DEP_ANTI:
187 	  t = 'a';
188 	  break;
189 
190 	default:
191 	  gcc_unreachable ();
192 	  break;
193 	}
194 
195       fprintf (dump, "%c; ", t);
196     }
197 
198   if (flags & DUMP_DEP_STATUS)
199     {
200       if (current_sched_info->flags & USE_DEPS_LIST)
201 	dump_ds (dump, DEP_STATUS (dep));
202     }
203 
204   fprintf (dump, ">");
205 }
206 
207 /* Default flags for dump_dep ().  */
208 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
209 
210 /* Dump all fields of DEP to STDERR.  */
211 void
sd_debug_dep(dep_t dep)212 sd_debug_dep (dep_t dep)
213 {
214   dump_dep (stderr, dep, 1);
215   fprintf (stderr, "\n");
216 }
217 
218 /* Determine whether DEP is a dependency link of a non-debug insn on a
219    debug insn.  */
220 
221 static inline bool
depl_on_debug_p(dep_link_t dep)222 depl_on_debug_p (dep_link_t dep)
223 {
224   return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
225 	  && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
226 }
227 
228 /* Functions to operate with a single link from the dependencies lists -
229    dep_link_t.  */
230 
231 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
232    PREV_NEXT_P.  */
233 static void
attach_dep_link(dep_link_t l,dep_link_t * prev_nextp)234 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
235 {
236   dep_link_t next = *prev_nextp;
237 
238   gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
239 	      && DEP_LINK_NEXT (l) == NULL);
240 
241   /* Init node being inserted.  */
242   DEP_LINK_PREV_NEXTP (l) = prev_nextp;
243   DEP_LINK_NEXT (l) = next;
244 
245   /* Fix next node.  */
246   if (next != NULL)
247     {
248       gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
249 
250       DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
251     }
252 
253   /* Fix prev node.  */
254   *prev_nextp = l;
255 }
256 
257 /* Add dep_link LINK to deps_list L.  */
258 static void
add_to_deps_list(dep_link_t link,deps_list_t l)259 add_to_deps_list (dep_link_t link, deps_list_t l)
260 {
261   attach_dep_link (link, &DEPS_LIST_FIRST (l));
262 
263   /* Don't count debug deps.  */
264   if (!depl_on_debug_p (link))
265     ++DEPS_LIST_N_LINKS (l);
266 }
267 
268 /* Detach dep_link L from the list.  */
269 static void
detach_dep_link(dep_link_t l)270 detach_dep_link (dep_link_t l)
271 {
272   dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
273   dep_link_t next = DEP_LINK_NEXT (l);
274 
275   *prev_nextp = next;
276 
277   if (next != NULL)
278     DEP_LINK_PREV_NEXTP (next) = prev_nextp;
279 
280   DEP_LINK_PREV_NEXTP (l) = NULL;
281   DEP_LINK_NEXT (l) = NULL;
282 }
283 
284 /* Remove link LINK from list LIST.  */
285 static void
remove_from_deps_list(dep_link_t link,deps_list_t list)286 remove_from_deps_list (dep_link_t link, deps_list_t list)
287 {
288   detach_dep_link (link);
289 
290   /* Don't count debug deps.  */
291   if (!depl_on_debug_p (link))
292     --DEPS_LIST_N_LINKS (list);
293 }
294 
295 /* Move link LINK from list FROM to list TO.  */
296 static void
move_dep_link(dep_link_t link,deps_list_t from,deps_list_t to)297 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
298 {
299   remove_from_deps_list (link, from);
300   add_to_deps_list (link, to);
301 }
302 
303 /* Return true of LINK is not attached to any list.  */
304 static bool
dep_link_is_detached_p(dep_link_t link)305 dep_link_is_detached_p (dep_link_t link)
306 {
307   return DEP_LINK_PREV_NEXTP (link) == NULL;
308 }
309 
310 /* Pool to hold all dependency nodes (dep_node_t).  */
311 static object_allocator<_dep_node> *dn_pool;
312 
313 /* Number of dep_nodes out there.  */
314 static int dn_pool_diff = 0;
315 
316 /* Create a dep_node.  */
317 static dep_node_t
create_dep_node(void)318 create_dep_node (void)
319 {
320   dep_node_t n = dn_pool->allocate ();
321   dep_link_t back = DEP_NODE_BACK (n);
322   dep_link_t forw = DEP_NODE_FORW (n);
323 
324   DEP_LINK_NODE (back) = n;
325   DEP_LINK_NEXT (back) = NULL;
326   DEP_LINK_PREV_NEXTP (back) = NULL;
327 
328   DEP_LINK_NODE (forw) = n;
329   DEP_LINK_NEXT (forw) = NULL;
330   DEP_LINK_PREV_NEXTP (forw) = NULL;
331 
332   ++dn_pool_diff;
333 
334   return n;
335 }
336 
337 /* Delete dep_node N.  N must not be connected to any deps_list.  */
338 static void
delete_dep_node(dep_node_t n)339 delete_dep_node (dep_node_t n)
340 {
341   gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
342 	      && dep_link_is_detached_p (DEP_NODE_FORW (n)));
343 
344   XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
345 
346   --dn_pool_diff;
347 
348   dn_pool->remove (n);
349 }
350 
351 /* Pool to hold dependencies lists (deps_list_t).  */
352 static object_allocator<_deps_list> *dl_pool;
353 
354 /* Number of deps_lists out there.  */
355 static int dl_pool_diff = 0;
356 
357 /* Functions to operate with dependences lists - deps_list_t.  */
358 
359 /* Return true if list L is empty.  */
360 static bool
deps_list_empty_p(deps_list_t l)361 deps_list_empty_p (deps_list_t l)
362 {
363   return DEPS_LIST_N_LINKS (l) == 0;
364 }
365 
366 /* Create a new deps_list.  */
367 static deps_list_t
create_deps_list(void)368 create_deps_list (void)
369 {
370   deps_list_t l = dl_pool->allocate ();
371 
372   DEPS_LIST_FIRST (l) = NULL;
373   DEPS_LIST_N_LINKS (l) = 0;
374 
375   ++dl_pool_diff;
376   return l;
377 }
378 
379 /* Free deps_list L.  */
380 static void
free_deps_list(deps_list_t l)381 free_deps_list (deps_list_t l)
382 {
383   gcc_assert (deps_list_empty_p (l));
384 
385   --dl_pool_diff;
386 
387   dl_pool->remove (l);
388 }
389 
390 /* Return true if there is no dep_nodes and deps_lists out there.
391    After the region is scheduled all the dependency nodes and lists
392    should [generally] be returned to pool.  */
393 bool
deps_pools_are_empty_p(void)394 deps_pools_are_empty_p (void)
395 {
396   return dn_pool_diff == 0 && dl_pool_diff == 0;
397 }
398 
399 /* Remove all elements from L.  */
400 static void
clear_deps_list(deps_list_t l)401 clear_deps_list (deps_list_t l)
402 {
403   do
404     {
405       dep_link_t link = DEPS_LIST_FIRST (l);
406 
407       if (link == NULL)
408 	break;
409 
410       remove_from_deps_list (link, l);
411     }
412   while (1);
413 }
414 
415 /* Decide whether a dependency should be treated as a hard or a speculative
416    dependency.  */
417 static bool
dep_spec_p(dep_t dep)418 dep_spec_p (dep_t dep)
419 {
420   if (current_sched_info->flags & DO_SPECULATION)
421     {
422       if (DEP_STATUS (dep) & SPECULATIVE)
423 	return true;
424     }
425   if (current_sched_info->flags & DO_PREDICATION)
426     {
427       if (DEP_TYPE (dep) == REG_DEP_CONTROL)
428 	return true;
429     }
430   if (DEP_REPLACE (dep) != NULL)
431     return true;
432   return false;
433 }
434 
435 static regset reg_pending_sets;
436 static regset reg_pending_clobbers;
437 static regset reg_pending_uses;
438 static regset reg_pending_control_uses;
439 static enum reg_pending_barrier_mode reg_pending_barrier;
440 
441 /* Hard registers implicitly clobbered or used (or may be implicitly
442    clobbered or used) by the currently analyzed insn.  For example,
443    insn in its constraint has one register class.  Even if there is
444    currently no hard register in the insn, the particular hard
445    register will be in the insn after reload pass because the
446    constraint requires it.  */
447 static HARD_REG_SET implicit_reg_pending_clobbers;
448 static HARD_REG_SET implicit_reg_pending_uses;
449 
450 /* To speed up the test for duplicate dependency links we keep a
451    record of dependencies created by add_dependence when the average
452    number of instructions in a basic block is very large.
453 
454    Studies have shown that there is typically around 5 instructions between
455    branches for typical C code.  So we can make a guess that the average
456    basic block is approximately 5 instructions long; we will choose 100X
457    the average size as a very large basic block.
458 
459    Each insn has associated bitmaps for its dependencies.  Each bitmap
460    has enough entries to represent a dependency on any other insn in
461    the insn chain.  All bitmap for true dependencies cache is
462    allocated then the rest two ones are also allocated.  */
463 static bitmap_head *true_dependency_cache = NULL;
464 static bitmap_head *output_dependency_cache = NULL;
465 static bitmap_head *anti_dependency_cache = NULL;
466 static bitmap_head *control_dependency_cache = NULL;
467 static bitmap_head *spec_dependency_cache = NULL;
468 static int cache_size;
469 
470 /* True if we should mark added dependencies as a non-register deps.  */
471 static bool mark_as_hard;
472 
473 static int deps_may_trap_p (const_rtx);
474 static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
475 static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
476 				 enum reg_note, bool);
477 static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
478 					  rtx_insn_list **, int, enum reg_note,
479 					  bool);
480 static void delete_all_dependences (rtx_insn *);
481 static void chain_to_prev_insn (rtx_insn *);
482 
483 static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
484 static void sched_analyze_1 (struct deps_desc *, rtx, rtx_insn *);
485 static void sched_analyze_2 (struct deps_desc *, rtx, rtx_insn *);
486 static void sched_analyze_insn (struct deps_desc *, rtx, rtx_insn *);
487 
488 static bool sched_has_condition_p (const rtx_insn *);
489 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
490 
491 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
492 							  rtx, rtx);
493 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
494 
495 static void check_dep (dep_t, bool);
496 
497 
498 /* Return nonzero if a load of the memory reference MEM can cause a trap.  */
499 
500 static int
deps_may_trap_p(const_rtx mem)501 deps_may_trap_p (const_rtx mem)
502 {
503   const_rtx addr = XEXP (mem, 0);
504 
505   if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
506     {
507       const_rtx t = get_reg_known_value (REGNO (addr));
508       if (t)
509 	addr = t;
510     }
511   return rtx_addr_can_trap_p (addr);
512 }
513 
514 
515 /* Find the condition under which INSN is executed.  If REV is not NULL,
516    it is set to TRUE when the returned comparison should be reversed
517    to get the actual condition.  */
518 static rtx
sched_get_condition_with_rev_uncached(const rtx_insn * insn,bool * rev)519 sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev)
520 {
521   rtx pat = PATTERN (insn);
522   rtx src;
523 
524   if (rev)
525     *rev = false;
526 
527   if (GET_CODE (pat) == COND_EXEC)
528     return COND_EXEC_TEST (pat);
529 
530   if (!any_condjump_p (insn) || !onlyjump_p (insn))
531     return 0;
532 
533   src = SET_SRC (pc_set (insn));
534 
535   if (XEXP (src, 2) == pc_rtx)
536     return XEXP (src, 0);
537   else if (XEXP (src, 1) == pc_rtx)
538     {
539       rtx cond = XEXP (src, 0);
540       enum rtx_code revcode = reversed_comparison_code (cond, insn);
541 
542       if (revcode == UNKNOWN)
543 	return 0;
544 
545       if (rev)
546 	*rev = true;
547       return cond;
548     }
549 
550   return 0;
551 }
552 
553 /* Return the condition under which INSN does not execute (i.e.  the
554    not-taken condition for a conditional branch), or NULL if we cannot
555    find such a condition.  The caller should make a copy of the condition
556    before using it.  */
557 rtx
sched_get_reverse_condition_uncached(const rtx_insn * insn)558 sched_get_reverse_condition_uncached (const rtx_insn *insn)
559 {
560   bool rev;
561   rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
562   if (cond == NULL_RTX)
563     return cond;
564   if (!rev)
565     {
566       enum rtx_code revcode = reversed_comparison_code (cond, insn);
567       cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
568 			     XEXP (cond, 0),
569 			     XEXP (cond, 1));
570     }
571   return cond;
572 }
573 
574 /* Caching variant of sched_get_condition_with_rev_uncached.
575    We only do actual work the first time we come here for an insn; the
576    results are cached in INSN_CACHED_COND and INSN_REVERSE_COND.  */
577 static rtx
sched_get_condition_with_rev(const rtx_insn * insn,bool * rev)578 sched_get_condition_with_rev (const rtx_insn *insn, bool *rev)
579 {
580   bool tmp;
581 
582   if (INSN_LUID (insn) == 0)
583     return sched_get_condition_with_rev_uncached (insn, rev);
584 
585   if (INSN_CACHED_COND (insn) == const_true_rtx)
586     return NULL_RTX;
587 
588   if (INSN_CACHED_COND (insn) != NULL_RTX)
589     {
590       if (rev)
591 	*rev = INSN_REVERSE_COND (insn);
592       return INSN_CACHED_COND (insn);
593     }
594 
595   INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
596   INSN_REVERSE_COND (insn) = tmp;
597 
598   if (INSN_CACHED_COND (insn) == NULL_RTX)
599     {
600       INSN_CACHED_COND (insn) = const_true_rtx;
601       return NULL_RTX;
602     }
603 
604   if (rev)
605     *rev = INSN_REVERSE_COND (insn);
606   return INSN_CACHED_COND (insn);
607 }
608 
609 /* True when we can find a condition under which INSN is executed.  */
610 static bool
sched_has_condition_p(const rtx_insn * insn)611 sched_has_condition_p (const rtx_insn *insn)
612 {
613   return !! sched_get_condition_with_rev (insn, NULL);
614 }
615 
616 
617 
618 /* Return nonzero if conditions COND1 and COND2 can never be both true.  */
619 static int
conditions_mutex_p(const_rtx cond1,const_rtx cond2,bool rev1,bool rev2)620 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
621 {
622   if (COMPARISON_P (cond1)
623       && COMPARISON_P (cond2)
624       && GET_CODE (cond1) ==
625 	  (rev1==rev2
626 	  ? reversed_comparison_code (cond2, NULL)
627 	  : GET_CODE (cond2))
628       && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
629       && XEXP (cond1, 1) == XEXP (cond2, 1))
630     return 1;
631   return 0;
632 }
633 
634 /* Return true if insn1 and insn2 can never depend on one another because
635    the conditions under which they are executed are mutually exclusive.  */
636 bool
sched_insns_conditions_mutex_p(const rtx_insn * insn1,const rtx_insn * insn2)637 sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2)
638 {
639   rtx cond1, cond2;
640   bool rev1 = false, rev2 = false;
641 
642   /* df doesn't handle conditional lifetimes entirely correctly;
643      calls mess up the conditional lifetimes.  */
644   if (!CALL_P (insn1) && !CALL_P (insn2))
645     {
646       cond1 = sched_get_condition_with_rev (insn1, &rev1);
647       cond2 = sched_get_condition_with_rev (insn2, &rev2);
648       if (cond1 && cond2
649 	  && conditions_mutex_p (cond1, cond2, rev1, rev2)
650 	  /* Make sure first instruction doesn't affect condition of second
651 	     instruction if switched.  */
652 	  && !modified_in_p (cond1, insn2)
653 	  /* Make sure second instruction doesn't affect condition of first
654 	     instruction if switched.  */
655 	  && !modified_in_p (cond2, insn1))
656 	return true;
657     }
658   return false;
659 }
660 
661 
662 /* Return true if INSN can potentially be speculated with type DS.  */
663 bool
sched_insn_is_legitimate_for_speculation_p(const rtx_insn * insn,ds_t ds)664 sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds)
665 {
666   if (HAS_INTERNAL_DEP (insn))
667     return false;
668 
669   if (!NONJUMP_INSN_P (insn))
670     return false;
671 
672   if (SCHED_GROUP_P (insn))
673     return false;
674 
675   if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn)))
676     return false;
677 
678   if (side_effects_p (PATTERN (insn)))
679     return false;
680 
681   if (ds & BE_IN_SPEC)
682     /* The following instructions, which depend on a speculatively scheduled
683        instruction, cannot be speculatively scheduled along.  */
684     {
685       if (may_trap_or_fault_p (PATTERN (insn)))
686 	/* If instruction might fault, it cannot be speculatively scheduled.
687 	   For control speculation it's obvious why and for data speculation
688 	   it's because the insn might get wrong input if speculation
689 	   wasn't successful.  */
690 	return false;
691 
692       if ((ds & BE_IN_DATA)
693 	  && sched_has_condition_p (insn))
694 	/* If this is a predicated instruction, then it cannot be
695 	   speculatively scheduled.  See PR35659.  */
696 	return false;
697     }
698 
699   return true;
700 }
701 
702 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
703    initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
704    and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
705    This function is used to switch sd_iterator to the next list.
706    !!! For internal use only.  Might consider moving it to sched-int.h.  */
707 void
sd_next_list(const_rtx insn,sd_list_types_def * types_ptr,deps_list_t * list_ptr,bool * resolved_p_ptr)708 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
709 	      deps_list_t *list_ptr, bool *resolved_p_ptr)
710 {
711   sd_list_types_def types = *types_ptr;
712 
713   if (types & SD_LIST_HARD_BACK)
714     {
715       *list_ptr = INSN_HARD_BACK_DEPS (insn);
716       *resolved_p_ptr = false;
717       *types_ptr = types & ~SD_LIST_HARD_BACK;
718     }
719   else if (types & SD_LIST_SPEC_BACK)
720     {
721       *list_ptr = INSN_SPEC_BACK_DEPS (insn);
722       *resolved_p_ptr = false;
723       *types_ptr = types & ~SD_LIST_SPEC_BACK;
724     }
725   else if (types & SD_LIST_FORW)
726     {
727       *list_ptr = INSN_FORW_DEPS (insn);
728       *resolved_p_ptr = false;
729       *types_ptr = types & ~SD_LIST_FORW;
730     }
731   else if (types & SD_LIST_RES_BACK)
732     {
733       *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
734       *resolved_p_ptr = true;
735       *types_ptr = types & ~SD_LIST_RES_BACK;
736     }
737   else if (types & SD_LIST_RES_FORW)
738     {
739       *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
740       *resolved_p_ptr = true;
741       *types_ptr = types & ~SD_LIST_RES_FORW;
742     }
743   else
744     {
745       *list_ptr = NULL;
746       *resolved_p_ptr = false;
747       *types_ptr = SD_LIST_NONE;
748     }
749 }
750 
751 /* Return the summary size of INSN's lists defined by LIST_TYPES.  */
752 int
sd_lists_size(const_rtx insn,sd_list_types_def list_types)753 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
754 {
755   int size = 0;
756 
757   while (list_types != SD_LIST_NONE)
758     {
759       deps_list_t list;
760       bool resolved_p;
761 
762       sd_next_list (insn, &list_types, &list, &resolved_p);
763       if (list)
764 	size += DEPS_LIST_N_LINKS (list);
765     }
766 
767   return size;
768 }
769 
770 /* Return true if INSN's lists defined by LIST_TYPES are all empty.  */
771 
772 bool
sd_lists_empty_p(const_rtx insn,sd_list_types_def list_types)773 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
774 {
775   while (list_types != SD_LIST_NONE)
776     {
777       deps_list_t list;
778       bool resolved_p;
779 
780       sd_next_list (insn, &list_types, &list, &resolved_p);
781       if (!deps_list_empty_p (list))
782 	return false;
783     }
784 
785   return true;
786 }
787 
788 /* Initialize data for INSN.  */
789 void
sd_init_insn(rtx_insn * insn)790 sd_init_insn (rtx_insn *insn)
791 {
792   INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
793   INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
794   INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
795   INSN_FORW_DEPS (insn) = create_deps_list ();
796   INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
797 
798   /* ??? It would be nice to allocate dependency caches here.  */
799 }
800 
801 /* Free data for INSN.  */
802 void
sd_finish_insn(rtx_insn * insn)803 sd_finish_insn (rtx_insn *insn)
804 {
805   /* ??? It would be nice to deallocate dependency caches here.  */
806 
807   free_deps_list (INSN_HARD_BACK_DEPS (insn));
808   INSN_HARD_BACK_DEPS (insn) = NULL;
809 
810   free_deps_list (INSN_SPEC_BACK_DEPS (insn));
811   INSN_SPEC_BACK_DEPS (insn) = NULL;
812 
813   free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
814   INSN_RESOLVED_BACK_DEPS (insn) = NULL;
815 
816   free_deps_list (INSN_FORW_DEPS (insn));
817   INSN_FORW_DEPS (insn) = NULL;
818 
819   free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
820   INSN_RESOLVED_FORW_DEPS (insn) = NULL;
821 }
822 
823 /* Find a dependency between producer PRO and consumer CON.
824    Search through resolved dependency lists if RESOLVED_P is true.
825    If no such dependency is found return NULL,
826    otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
827    with an iterator pointing to it.  */
828 static dep_t
sd_find_dep_between_no_cache(rtx pro,rtx con,bool resolved_p,sd_iterator_def * sd_it_ptr)829 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
830 			      sd_iterator_def *sd_it_ptr)
831 {
832   sd_list_types_def pro_list_type;
833   sd_list_types_def con_list_type;
834   sd_iterator_def sd_it;
835   dep_t dep;
836   bool found_p = false;
837 
838   if (resolved_p)
839     {
840       pro_list_type = SD_LIST_RES_FORW;
841       con_list_type = SD_LIST_RES_BACK;
842     }
843   else
844     {
845       pro_list_type = SD_LIST_FORW;
846       con_list_type = SD_LIST_BACK;
847     }
848 
849   /* Walk through either back list of INSN or forw list of ELEM
850      depending on which one is shorter.  */
851   if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
852     {
853       /* Find the dep_link with producer PRO in consumer's back_deps.  */
854       FOR_EACH_DEP (con, con_list_type, sd_it, dep)
855 	if (DEP_PRO (dep) == pro)
856 	  {
857 	    found_p = true;
858 	    break;
859 	  }
860     }
861   else
862     {
863       /* Find the dep_link with consumer CON in producer's forw_deps.  */
864       FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
865 	if (DEP_CON (dep) == con)
866 	  {
867 	    found_p = true;
868 	    break;
869 	  }
870     }
871 
872   if (found_p)
873     {
874       if (sd_it_ptr != NULL)
875 	*sd_it_ptr = sd_it;
876 
877       return dep;
878     }
879 
880   return NULL;
881 }
882 
883 /* Find a dependency between producer PRO and consumer CON.
884    Use dependency [if available] to check if dependency is present at all.
885    Search through resolved dependency lists if RESOLVED_P is true.
886    If the dependency or NULL if none found.  */
887 dep_t
sd_find_dep_between(rtx pro,rtx con,bool resolved_p)888 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
889 {
890   if (true_dependency_cache != NULL)
891     /* Avoiding the list walk below can cut compile times dramatically
892        for some code.  */
893     {
894       int elem_luid = INSN_LUID (pro);
895       int insn_luid = INSN_LUID (con);
896 
897       if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
898 	  && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
899 	  && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
900 	  && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
901 	return NULL;
902     }
903 
904   return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
905 }
906 
907 /* Add or update  a dependence described by DEP.
908    MEM1 and MEM2, if non-null, correspond to memory locations in case of
909    data speculation.
910 
911    The function returns a value indicating if an old entry has been changed
912    or a new entry has been added to insn's backward deps.
913 
914    This function merely checks if producer and consumer is the same insn
915    and doesn't create a dep in this case.  Actual manipulation of
916    dependence data structures is performed in add_or_update_dep_1.  */
917 static enum DEPS_ADJUST_RESULT
maybe_add_or_update_dep_1(dep_t dep,bool resolved_p,rtx mem1,rtx mem2)918 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
919 {
920   rtx_insn *elem = DEP_PRO (dep);
921   rtx_insn *insn = DEP_CON (dep);
922 
923   gcc_assert (INSN_P (insn) && INSN_P (elem));
924 
925   /* Don't depend an insn on itself.  */
926   if (insn == elem)
927     {
928       if (sched_deps_info->generate_spec_deps)
929         /* INSN has an internal dependence, which we can't overcome.  */
930         HAS_INTERNAL_DEP (insn) = 1;
931 
932       return DEP_NODEP;
933     }
934 
935   return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
936 }
937 
938 /* Ask dependency caches what needs to be done for dependence DEP.
939    Return DEP_CREATED if new dependence should be created and there is no
940    need to try to find one searching the dependencies lists.
941    Return DEP_PRESENT if there already is a dependence described by DEP and
942    hence nothing is to be done.
943    Return DEP_CHANGED if there already is a dependence, but it should be
944    updated to incorporate additional information from DEP.  */
945 static enum DEPS_ADJUST_RESULT
ask_dependency_caches(dep_t dep)946 ask_dependency_caches (dep_t dep)
947 {
948   int elem_luid = INSN_LUID (DEP_PRO (dep));
949   int insn_luid = INSN_LUID (DEP_CON (dep));
950 
951   gcc_assert (true_dependency_cache != NULL
952 	      && output_dependency_cache != NULL
953 	      && anti_dependency_cache != NULL
954 	      && control_dependency_cache != NULL);
955 
956   if (!(current_sched_info->flags & USE_DEPS_LIST))
957     {
958       enum reg_note present_dep_type;
959 
960       if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
961 	present_dep_type = REG_DEP_TRUE;
962       else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
963 	present_dep_type = REG_DEP_OUTPUT;
964       else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
965 	present_dep_type = REG_DEP_ANTI;
966       else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
967 	present_dep_type = REG_DEP_CONTROL;
968       else
969 	/* There is no existing dep so it should be created.  */
970 	return DEP_CREATED;
971 
972       if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
973 	/* DEP does not add anything to the existing dependence.  */
974 	return DEP_PRESENT;
975     }
976   else
977     {
978       ds_t present_dep_types = 0;
979 
980       if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
981 	present_dep_types |= DEP_TRUE;
982       if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
983 	present_dep_types |= DEP_OUTPUT;
984       if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
985 	present_dep_types |= DEP_ANTI;
986       if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
987 	present_dep_types |= DEP_CONTROL;
988 
989       if (present_dep_types == 0)
990 	/* There is no existing dep so it should be created.  */
991 	return DEP_CREATED;
992 
993       if (!(current_sched_info->flags & DO_SPECULATION)
994 	  || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
995 	{
996 	  if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
997 	      == present_dep_types)
998 	    /* DEP does not add anything to the existing dependence.  */
999 	    return DEP_PRESENT;
1000 	}
1001       else
1002 	{
1003 	  /* Only true dependencies can be data speculative and
1004 	     only anti dependencies can be control speculative.  */
1005 	  gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1006 		      == present_dep_types);
1007 
1008 	  /* if (DEP is SPECULATIVE) then
1009 	     ..we should update DEP_STATUS
1010 	     else
1011 	     ..we should reset existing dep to non-speculative.  */
1012 	}
1013     }
1014 
1015   return DEP_CHANGED;
1016 }
1017 
1018 /* Set dependency caches according to DEP.  */
1019 static void
set_dependency_caches(dep_t dep)1020 set_dependency_caches (dep_t dep)
1021 {
1022   int elem_luid = INSN_LUID (DEP_PRO (dep));
1023   int insn_luid = INSN_LUID (DEP_CON (dep));
1024 
1025   if (!(current_sched_info->flags & USE_DEPS_LIST))
1026     {
1027       switch (DEP_TYPE (dep))
1028 	{
1029 	case REG_DEP_TRUE:
1030 	  bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1031 	  break;
1032 
1033 	case REG_DEP_OUTPUT:
1034 	  bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1035 	  break;
1036 
1037 	case REG_DEP_ANTI:
1038 	  bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1039 	  break;
1040 
1041 	case REG_DEP_CONTROL:
1042 	  bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1043 	  break;
1044 
1045 	default:
1046 	  gcc_unreachable ();
1047 	}
1048     }
1049   else
1050     {
1051       ds_t ds = DEP_STATUS (dep);
1052 
1053       if (ds & DEP_TRUE)
1054 	bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1055       if (ds & DEP_OUTPUT)
1056 	bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1057       if (ds & DEP_ANTI)
1058 	bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1059       if (ds & DEP_CONTROL)
1060 	bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1061 
1062       if (ds & SPECULATIVE)
1063 	{
1064 	  gcc_assert (current_sched_info->flags & DO_SPECULATION);
1065 	  bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1066 	}
1067     }
1068 }
1069 
1070 /* Type of dependence DEP have changed from OLD_TYPE.  Update dependency
1071    caches accordingly.  */
1072 static void
update_dependency_caches(dep_t dep,enum reg_note old_type)1073 update_dependency_caches (dep_t dep, enum reg_note old_type)
1074 {
1075   int elem_luid = INSN_LUID (DEP_PRO (dep));
1076   int insn_luid = INSN_LUID (DEP_CON (dep));
1077 
1078   /* Clear corresponding cache entry because type of the link
1079      may have changed.  Keep them if we use_deps_list.  */
1080   if (!(current_sched_info->flags & USE_DEPS_LIST))
1081     {
1082       switch (old_type)
1083 	{
1084 	case REG_DEP_OUTPUT:
1085 	  bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1086 	  break;
1087 
1088 	case REG_DEP_ANTI:
1089 	  bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1090 	  break;
1091 
1092 	case REG_DEP_CONTROL:
1093 	  bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1094 	  break;
1095 
1096 	default:
1097 	  gcc_unreachable ();
1098 	}
1099     }
1100 
1101   set_dependency_caches (dep);
1102 }
1103 
1104 /* Convert a dependence pointed to by SD_IT to be non-speculative.  */
1105 static void
change_spec_dep_to_hard(sd_iterator_def sd_it)1106 change_spec_dep_to_hard (sd_iterator_def sd_it)
1107 {
1108   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1109   dep_link_t link = DEP_NODE_BACK (node);
1110   dep_t dep = DEP_NODE_DEP (node);
1111   rtx_insn *elem = DEP_PRO (dep);
1112   rtx_insn *insn = DEP_CON (dep);
1113 
1114   move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1115 
1116   DEP_STATUS (dep) &= ~SPECULATIVE;
1117 
1118   if (true_dependency_cache != NULL)
1119     /* Clear the cache entry.  */
1120     bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1121 		      INSN_LUID (elem));
1122 }
1123 
1124 /* Update DEP to incorporate information from NEW_DEP.
1125    SD_IT points to DEP in case it should be moved to another list.
1126    MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1127    data-speculative dependence should be updated.  */
1128 static enum DEPS_ADJUST_RESULT
update_dep(dep_t dep,dep_t new_dep,sd_iterator_def sd_it ATTRIBUTE_UNUSED,rtx mem1 ATTRIBUTE_UNUSED,rtx mem2 ATTRIBUTE_UNUSED)1129 update_dep (dep_t dep, dep_t new_dep,
1130 	    sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1131 	    rtx mem1 ATTRIBUTE_UNUSED,
1132 	    rtx mem2 ATTRIBUTE_UNUSED)
1133 {
1134   enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1135   enum reg_note old_type = DEP_TYPE (dep);
1136   bool was_spec = dep_spec_p (dep);
1137 
1138   DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1139   DEP_MULTIPLE (dep) = 1;
1140 
1141   /* If this is a more restrictive type of dependence than the
1142      existing one, then change the existing dependence to this
1143      type.  */
1144   if ((int) DEP_TYPE (new_dep) < (int) old_type)
1145     {
1146       DEP_TYPE (dep) = DEP_TYPE (new_dep);
1147       res = DEP_CHANGED;
1148     }
1149 
1150   if (current_sched_info->flags & USE_DEPS_LIST)
1151     /* Update DEP_STATUS.  */
1152     {
1153       ds_t dep_status = DEP_STATUS (dep);
1154       ds_t ds = DEP_STATUS (new_dep);
1155       ds_t new_status = ds | dep_status;
1156 
1157       if (new_status & SPECULATIVE)
1158 	{
1159 	  /* Either existing dep or a dep we're adding or both are
1160 	     speculative.  */
1161 	  if (!(ds & SPECULATIVE)
1162 	      || !(dep_status & SPECULATIVE))
1163 	    /* The new dep can't be speculative.  */
1164 	    new_status &= ~SPECULATIVE;
1165 	  else
1166 	    {
1167 	      /* Both are speculative.  Merge probabilities.  */
1168 	      if (mem1 != NULL)
1169 		{
1170 		  dw_t dw;
1171 
1172 		  dw = estimate_dep_weak (mem1, mem2);
1173 		  ds = set_dep_weak (ds, BEGIN_DATA, dw);
1174 		}
1175 
1176 	      new_status = ds_merge (dep_status, ds);
1177 	    }
1178 	}
1179 
1180       ds = new_status;
1181 
1182       if (dep_status != ds)
1183 	{
1184 	  DEP_STATUS (dep) = ds;
1185 	  res = DEP_CHANGED;
1186 	}
1187     }
1188 
1189   if (was_spec && !dep_spec_p (dep))
1190     /* The old dep was speculative, but now it isn't.  */
1191     change_spec_dep_to_hard (sd_it);
1192 
1193   if (true_dependency_cache != NULL
1194       && res == DEP_CHANGED)
1195     update_dependency_caches (dep, old_type);
1196 
1197   return res;
1198 }
1199 
1200 /* Add or update  a dependence described by DEP.
1201    MEM1 and MEM2, if non-null, correspond to memory locations in case of
1202    data speculation.
1203 
1204    The function returns a value indicating if an old entry has been changed
1205    or a new entry has been added to insn's backward deps or nothing has
1206    been updated at all.  */
1207 static enum DEPS_ADJUST_RESULT
add_or_update_dep_1(dep_t new_dep,bool resolved_p,rtx mem1 ATTRIBUTE_UNUSED,rtx mem2 ATTRIBUTE_UNUSED)1208 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1209 		     rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1210 {
1211   bool maybe_present_p = true;
1212   bool present_p = false;
1213 
1214   gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1215 	      && DEP_PRO (new_dep) != DEP_CON (new_dep));
1216 
1217   if (flag_checking)
1218     check_dep (new_dep, mem1 != NULL);
1219 
1220   if (true_dependency_cache != NULL)
1221     {
1222       switch (ask_dependency_caches (new_dep))
1223 	{
1224 	case DEP_PRESENT:
1225 	  dep_t present_dep;
1226 	  sd_iterator_def sd_it;
1227 
1228 	  present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1229 						      DEP_CON (new_dep),
1230 						      resolved_p, &sd_it);
1231 	  DEP_MULTIPLE (present_dep) = 1;
1232 	  return DEP_PRESENT;
1233 
1234 	case DEP_CHANGED:
1235 	  maybe_present_p = true;
1236 	  present_p = true;
1237 	  break;
1238 
1239 	case DEP_CREATED:
1240 	  maybe_present_p = false;
1241 	  present_p = false;
1242 	  break;
1243 
1244 	default:
1245 	  gcc_unreachable ();
1246 	  break;
1247 	}
1248     }
1249 
1250   /* Check that we don't already have this dependence.  */
1251   if (maybe_present_p)
1252     {
1253       dep_t present_dep;
1254       sd_iterator_def sd_it;
1255 
1256       gcc_assert (true_dependency_cache == NULL || present_p);
1257 
1258       present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1259 						  DEP_CON (new_dep),
1260 						  resolved_p, &sd_it);
1261 
1262       if (present_dep != NULL)
1263 	/* We found an existing dependency between ELEM and INSN.  */
1264 	return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1265       else
1266 	/* We didn't find a dep, it shouldn't present in the cache.  */
1267 	gcc_assert (!present_p);
1268     }
1269 
1270   /* Might want to check one level of transitivity to save conses.
1271      This check should be done in maybe_add_or_update_dep_1.
1272      Since we made it to add_or_update_dep_1, we must create
1273      (or update) a link.  */
1274 
1275   if (mem1 != NULL_RTX)
1276     {
1277       gcc_assert (sched_deps_info->generate_spec_deps);
1278       DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1279 					   estimate_dep_weak (mem1, mem2));
1280     }
1281 
1282   sd_add_dep (new_dep, resolved_p);
1283 
1284   return DEP_CREATED;
1285 }
1286 
1287 /* Initialize BACK_LIST_PTR with consumer's backward list and
1288    FORW_LIST_PTR with producer's forward list.  If RESOLVED_P is true
1289    initialize with lists that hold resolved deps.  */
1290 static void
get_back_and_forw_lists(dep_t dep,bool resolved_p,deps_list_t * back_list_ptr,deps_list_t * forw_list_ptr)1291 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1292 			 deps_list_t *back_list_ptr,
1293 			 deps_list_t *forw_list_ptr)
1294 {
1295   rtx_insn *con = DEP_CON (dep);
1296 
1297   if (!resolved_p)
1298     {
1299       if (dep_spec_p (dep))
1300 	*back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1301       else
1302 	*back_list_ptr = INSN_HARD_BACK_DEPS (con);
1303 
1304       *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1305     }
1306   else
1307     {
1308       *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1309       *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1310     }
1311 }
1312 
1313 /* Add dependence described by DEP.
1314    If RESOLVED_P is true treat the dependence as a resolved one.  */
1315 void
sd_add_dep(dep_t dep,bool resolved_p)1316 sd_add_dep (dep_t dep, bool resolved_p)
1317 {
1318   dep_node_t n = create_dep_node ();
1319   deps_list_t con_back_deps;
1320   deps_list_t pro_forw_deps;
1321   rtx_insn *elem = DEP_PRO (dep);
1322   rtx_insn *insn = DEP_CON (dep);
1323 
1324   gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1325 
1326   if ((current_sched_info->flags & DO_SPECULATION) == 0
1327       || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1328     DEP_STATUS (dep) &= ~SPECULATIVE;
1329 
1330   copy_dep (DEP_NODE_DEP (n), dep);
1331 
1332   get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1333 
1334   add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1335 
1336   if (flag_checking)
1337     check_dep (dep, false);
1338 
1339   add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1340 
1341   /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1342      in the bitmap caches of dependency information.  */
1343   if (true_dependency_cache != NULL)
1344     set_dependency_caches (dep);
1345 }
1346 
1347 /* Add or update backward dependence between INSN and ELEM
1348    with given type DEP_TYPE and dep_status DS.
1349    This function is a convenience wrapper.  */
1350 enum DEPS_ADJUST_RESULT
sd_add_or_update_dep(dep_t dep,bool resolved_p)1351 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1352 {
1353   return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1354 }
1355 
1356 /* Resolved dependence pointed to by SD_IT.
1357    SD_IT will advance to the next element.  */
1358 void
sd_resolve_dep(sd_iterator_def sd_it)1359 sd_resolve_dep (sd_iterator_def sd_it)
1360 {
1361   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1362   dep_t dep = DEP_NODE_DEP (node);
1363   rtx_insn *pro = DEP_PRO (dep);
1364   rtx_insn *con = DEP_CON (dep);
1365 
1366   if (dep_spec_p (dep))
1367     move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1368 		   INSN_RESOLVED_BACK_DEPS (con));
1369   else
1370     move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1371 		   INSN_RESOLVED_BACK_DEPS (con));
1372 
1373   move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1374 		 INSN_RESOLVED_FORW_DEPS (pro));
1375 }
1376 
1377 /* Perform the inverse operation of sd_resolve_dep.  Restore the dependence
1378    pointed to by SD_IT to unresolved state.  */
1379 void
sd_unresolve_dep(sd_iterator_def sd_it)1380 sd_unresolve_dep (sd_iterator_def sd_it)
1381 {
1382   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1383   dep_t dep = DEP_NODE_DEP (node);
1384   rtx_insn *pro = DEP_PRO (dep);
1385   rtx_insn *con = DEP_CON (dep);
1386 
1387   if (dep_spec_p (dep))
1388     move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1389 		   INSN_SPEC_BACK_DEPS (con));
1390   else
1391     move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1392 		   INSN_HARD_BACK_DEPS (con));
1393 
1394   move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1395 		 INSN_FORW_DEPS (pro));
1396 }
1397 
1398 /* Make TO depend on all the FROM's producers.
1399    If RESOLVED_P is true add dependencies to the resolved lists.  */
1400 void
sd_copy_back_deps(rtx_insn * to,rtx_insn * from,bool resolved_p)1401 sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
1402 {
1403   sd_list_types_def list_type;
1404   sd_iterator_def sd_it;
1405   dep_t dep;
1406 
1407   list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1408 
1409   FOR_EACH_DEP (from, list_type, sd_it, dep)
1410     {
1411       dep_def _new_dep, *new_dep = &_new_dep;
1412 
1413       copy_dep (new_dep, dep);
1414       DEP_CON (new_dep) = to;
1415       sd_add_dep (new_dep, resolved_p);
1416     }
1417 }
1418 
1419 /* Remove a dependency referred to by SD_IT.
1420    SD_IT will point to the next dependence after removal.  */
1421 void
sd_delete_dep(sd_iterator_def sd_it)1422 sd_delete_dep (sd_iterator_def sd_it)
1423 {
1424   dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1425   dep_t dep = DEP_NODE_DEP (n);
1426   rtx_insn *pro = DEP_PRO (dep);
1427   rtx_insn *con = DEP_CON (dep);
1428   deps_list_t con_back_deps;
1429   deps_list_t pro_forw_deps;
1430 
1431   if (true_dependency_cache != NULL)
1432     {
1433       int elem_luid = INSN_LUID (pro);
1434       int insn_luid = INSN_LUID (con);
1435 
1436       bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1437       bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1438       bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1439       bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1440 
1441       if (current_sched_info->flags & DO_SPECULATION)
1442 	bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1443     }
1444 
1445   get_back_and_forw_lists (dep, sd_it.resolved_p,
1446 			   &con_back_deps, &pro_forw_deps);
1447 
1448   remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1449   remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1450 
1451   delete_dep_node (n);
1452 }
1453 
1454 /* Dump size of the lists.  */
1455 #define DUMP_LISTS_SIZE (2)
1456 
1457 /* Dump dependencies of the lists.  */
1458 #define DUMP_LISTS_DEPS (4)
1459 
1460 /* Dump all information about the lists.  */
1461 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1462 
1463 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1464    FLAGS is a bit mask specifying what information about the lists needs
1465    to be printed.
1466    If FLAGS has the very first bit set, then dump all information about
1467    the lists and propagate this bit into the callee dump functions.  */
1468 static void
dump_lists(FILE * dump,rtx insn,sd_list_types_def types,int flags)1469 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1470 {
1471   sd_iterator_def sd_it;
1472   dep_t dep;
1473   int all;
1474 
1475   all = (flags & 1);
1476 
1477   if (all)
1478     flags |= DUMP_LISTS_ALL;
1479 
1480   fprintf (dump, "[");
1481 
1482   if (flags & DUMP_LISTS_SIZE)
1483     fprintf (dump, "%d; ", sd_lists_size (insn, types));
1484 
1485   if (flags & DUMP_LISTS_DEPS)
1486     {
1487       FOR_EACH_DEP (insn, types, sd_it, dep)
1488 	{
1489 	  dump_dep (dump, dep, dump_dep_flags | all);
1490 	  fprintf (dump, " ");
1491 	}
1492     }
1493 }
1494 
1495 /* Dump all information about deps_lists of INSN specified by TYPES
1496    to STDERR.  */
1497 void
sd_debug_lists(rtx insn,sd_list_types_def types)1498 sd_debug_lists (rtx insn, sd_list_types_def types)
1499 {
1500   dump_lists (stderr, insn, types, 1);
1501   fprintf (stderr, "\n");
1502 }
1503 
1504 /* A wrapper around add_dependence_1, to add a dependence of CON on
1505    PRO, with type DEP_TYPE.  This function implements special handling
1506    for REG_DEP_CONTROL dependencies.  For these, we optionally promote
1507    the type to REG_DEP_ANTI if we can determine that predication is
1508    impossible; otherwise we add additional true dependencies on the
1509    INSN_COND_DEPS list of the jump (which PRO must be).  */
1510 void
add_dependence(rtx_insn * con,rtx_insn * pro,enum reg_note dep_type)1511 add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
1512 {
1513   if (dep_type == REG_DEP_CONTROL
1514       && !(current_sched_info->flags & DO_PREDICATION))
1515     dep_type = REG_DEP_ANTI;
1516 
1517   /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1518      so we must also make the insn dependent on the setter of the
1519      condition.  */
1520   if (dep_type == REG_DEP_CONTROL)
1521     {
1522       rtx_insn *real_pro = pro;
1523       rtx_insn *other = real_insn_for_shadow (real_pro);
1524       rtx cond;
1525 
1526       if (other != NULL_RTX)
1527 	real_pro = other;
1528       cond = sched_get_reverse_condition_uncached (real_pro);
1529       /* Verify that the insn does not use a different value in
1530 	 the condition register than the one that was present at
1531 	 the jump.  */
1532       if (cond == NULL_RTX)
1533 	dep_type = REG_DEP_ANTI;
1534       else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1535 	{
1536 	  HARD_REG_SET uses;
1537 	  CLEAR_HARD_REG_SET (uses);
1538 	  note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1539 	  if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1540 	    dep_type = REG_DEP_ANTI;
1541 	}
1542       if (dep_type == REG_DEP_CONTROL)
1543 	{
1544 	  if (sched_verbose >= 5)
1545 	    fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1546 		     INSN_UID (real_pro));
1547 	  add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1548 			       REG_DEP_TRUE, false);
1549 	}
1550     }
1551 
1552   add_dependence_1 (con, pro, dep_type);
1553 }
1554 
1555 /* A convenience wrapper to operate on an entire list.  HARD should be
1556    true if DEP_NONREG should be set on newly created dependencies.  */
1557 
1558 static void
add_dependence_list(rtx_insn * insn,rtx_insn_list * list,int uncond,enum reg_note dep_type,bool hard)1559 add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
1560 		     enum reg_note dep_type, bool hard)
1561 {
1562   mark_as_hard = hard;
1563   for (; list; list = list->next ())
1564     {
1565       if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
1566 	add_dependence (insn, list->insn (), dep_type);
1567     }
1568   mark_as_hard = false;
1569 }
1570 
1571 /* Similar, but free *LISTP at the same time, when the context
1572    is not readonly.  HARD should be true if DEP_NONREG should be set on
1573    newly created dependencies.  */
1574 
1575 static void
add_dependence_list_and_free(struct deps_desc * deps,rtx_insn * insn,rtx_insn_list ** listp,int uncond,enum reg_note dep_type,bool hard)1576 add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
1577 			      rtx_insn_list **listp,
1578                               int uncond, enum reg_note dep_type, bool hard)
1579 {
1580   add_dependence_list (insn, *listp, uncond, dep_type, hard);
1581 
1582   /* We don't want to short-circuit dependencies involving debug
1583      insns, because they may cause actual dependencies to be
1584      disregarded.  */
1585   if (deps->readonly || DEBUG_INSN_P (insn))
1586     return;
1587 
1588   free_INSN_LIST_list (listp);
1589 }
1590 
1591 /* Remove all occurrences of INSN from LIST.  Return the number of
1592    occurrences removed.  */
1593 
1594 static int
remove_from_dependence_list(rtx_insn * insn,rtx_insn_list ** listp)1595 remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp)
1596 {
1597   int removed = 0;
1598 
1599   while (*listp)
1600     {
1601       if ((*listp)->insn () == insn)
1602         {
1603           remove_free_INSN_LIST_node (listp);
1604           removed++;
1605           continue;
1606         }
1607 
1608       listp = (rtx_insn_list **)&XEXP (*listp, 1);
1609     }
1610 
1611   return removed;
1612 }
1613 
1614 /* Same as above, but process two lists at once.  */
1615 static int
remove_from_both_dependence_lists(rtx_insn * insn,rtx_insn_list ** listp,rtx_expr_list ** exprp)1616 remove_from_both_dependence_lists (rtx_insn *insn,
1617 				   rtx_insn_list **listp,
1618 				   rtx_expr_list **exprp)
1619 {
1620   int removed = 0;
1621 
1622   while (*listp)
1623     {
1624       if (XEXP (*listp, 0) == insn)
1625         {
1626           remove_free_INSN_LIST_node (listp);
1627           remove_free_EXPR_LIST_node (exprp);
1628           removed++;
1629           continue;
1630         }
1631 
1632       listp = (rtx_insn_list **)&XEXP (*listp, 1);
1633       exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
1634     }
1635 
1636   return removed;
1637 }
1638 
1639 /* Clear all dependencies for an insn.  */
1640 static void
delete_all_dependences(rtx_insn * insn)1641 delete_all_dependences (rtx_insn *insn)
1642 {
1643   sd_iterator_def sd_it;
1644   dep_t dep;
1645 
1646   /* The below cycle can be optimized to clear the caches and back_deps
1647      in one call but that would provoke duplication of code from
1648      delete_dep ().  */
1649 
1650   for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1651        sd_iterator_cond (&sd_it, &dep);)
1652     sd_delete_dep (sd_it);
1653 }
1654 
1655 /* All insns in a scheduling group except the first should only have
1656    dependencies on the previous insn in the group.  So we find the
1657    first instruction in the scheduling group by walking the dependence
1658    chains backwards. Then we add the dependencies for the group to
1659    the previous nonnote insn.  */
1660 
1661 static void
chain_to_prev_insn(rtx_insn * insn)1662 chain_to_prev_insn (rtx_insn *insn)
1663 {
1664   sd_iterator_def sd_it;
1665   dep_t dep;
1666   rtx_insn *prev_nonnote;
1667 
1668   FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1669     {
1670       rtx_insn *i = insn;
1671       rtx_insn *pro = DEP_PRO (dep);
1672 
1673       do
1674 	{
1675 	  i = prev_nonnote_insn (i);
1676 
1677 	  if (pro == i)
1678 	    goto next_link;
1679 	} while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1680 
1681       if (! sched_insns_conditions_mutex_p (i, pro))
1682 	add_dependence (i, pro, DEP_TYPE (dep));
1683     next_link:;
1684     }
1685 
1686   delete_all_dependences (insn);
1687 
1688   prev_nonnote = prev_nonnote_nondebug_insn (insn);
1689   if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1690       && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1691     add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1692 }
1693 
1694 /* Process an insn's memory dependencies.  There are four kinds of
1695    dependencies:
1696 
1697    (0) read dependence: read follows read
1698    (1) true dependence: read follows write
1699    (2) output dependence: write follows write
1700    (3) anti dependence: write follows read
1701 
1702    We are careful to build only dependencies which actually exist, and
1703    use transitivity to avoid building too many links.  */
1704 
1705 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1706    The MEM is a memory reference contained within INSN, which we are saving
1707    so that we can do memory aliasing on it.  */
1708 
1709 static void
add_insn_mem_dependence(struct deps_desc * deps,bool read_p,rtx_insn * insn,rtx mem)1710 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1711 			 rtx_insn *insn, rtx mem)
1712 {
1713   rtx_insn_list **insn_list;
1714   rtx_insn_list *insn_node;
1715   rtx_expr_list **mem_list;
1716   rtx_expr_list *mem_node;
1717 
1718   gcc_assert (!deps->readonly);
1719   if (read_p)
1720     {
1721       insn_list = &deps->pending_read_insns;
1722       mem_list = &deps->pending_read_mems;
1723       if (!DEBUG_INSN_P (insn))
1724 	deps->pending_read_list_length++;
1725     }
1726   else
1727     {
1728       insn_list = &deps->pending_write_insns;
1729       mem_list = &deps->pending_write_mems;
1730       deps->pending_write_list_length++;
1731     }
1732 
1733   insn_node = alloc_INSN_LIST (insn, *insn_list);
1734   *insn_list = insn_node;
1735 
1736   if (sched_deps_info->use_cselib)
1737     {
1738       mem = shallow_copy_rtx (mem);
1739       XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1740 							GET_MODE (mem), insn);
1741     }
1742   mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1743   *mem_list = mem_node;
1744 }
1745 
1746 /* Make a dependency between every memory reference on the pending lists
1747    and INSN, thus flushing the pending lists.  FOR_READ is true if emitting
1748    dependencies for a read operation, similarly with FOR_WRITE.  */
1749 
1750 static void
flush_pending_lists(struct deps_desc * deps,rtx_insn * insn,int for_read,int for_write)1751 flush_pending_lists (struct deps_desc *deps, rtx_insn *insn, int for_read,
1752 		     int for_write)
1753 {
1754   if (for_write)
1755     {
1756       add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1757                                     1, REG_DEP_ANTI, true);
1758       if (!deps->readonly)
1759         {
1760           free_EXPR_LIST_list (&deps->pending_read_mems);
1761           deps->pending_read_list_length = 0;
1762         }
1763     }
1764 
1765   add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1766 				for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1767 				true);
1768 
1769   add_dependence_list_and_free (deps, insn,
1770                                 &deps->last_pending_memory_flush, 1,
1771                                 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1772 				true);
1773 
1774   add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1775 				REG_DEP_ANTI, true);
1776 
1777   if (DEBUG_INSN_P (insn))
1778     {
1779       if (for_write)
1780 	free_INSN_LIST_list (&deps->pending_read_insns);
1781       free_INSN_LIST_list (&deps->pending_write_insns);
1782       free_INSN_LIST_list (&deps->last_pending_memory_flush);
1783       free_INSN_LIST_list (&deps->pending_jump_insns);
1784     }
1785 
1786   if (!deps->readonly)
1787     {
1788       free_EXPR_LIST_list (&deps->pending_write_mems);
1789       deps->pending_write_list_length = 0;
1790 
1791       deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1792       deps->pending_flush_length = 1;
1793     }
1794   mark_as_hard = false;
1795 }
1796 
1797 /* Instruction which dependencies we are analyzing.  */
1798 static rtx_insn *cur_insn = NULL;
1799 
1800 /* Implement hooks for haifa scheduler.  */
1801 
1802 static void
haifa_start_insn(rtx_insn * insn)1803 haifa_start_insn (rtx_insn *insn)
1804 {
1805   gcc_assert (insn && !cur_insn);
1806 
1807   cur_insn = insn;
1808 }
1809 
1810 static void
haifa_finish_insn(void)1811 haifa_finish_insn (void)
1812 {
1813   cur_insn = NULL;
1814 }
1815 
1816 void
haifa_note_reg_set(int regno)1817 haifa_note_reg_set (int regno)
1818 {
1819   SET_REGNO_REG_SET (reg_pending_sets, regno);
1820 }
1821 
1822 void
haifa_note_reg_clobber(int regno)1823 haifa_note_reg_clobber (int regno)
1824 {
1825   SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1826 }
1827 
1828 void
haifa_note_reg_use(int regno)1829 haifa_note_reg_use (int regno)
1830 {
1831   SET_REGNO_REG_SET (reg_pending_uses, regno);
1832 }
1833 
1834 static void
haifa_note_mem_dep(rtx mem,rtx pending_mem,rtx_insn * pending_insn,ds_t ds)1835 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
1836 {
1837   if (!(ds & SPECULATIVE))
1838     {
1839       mem = NULL_RTX;
1840       pending_mem = NULL_RTX;
1841     }
1842   else
1843     gcc_assert (ds & BEGIN_DATA);
1844 
1845   {
1846     dep_def _dep, *dep = &_dep;
1847 
1848     init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1849                 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1850     DEP_NONREG (dep) = 1;
1851     maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1852   }
1853 
1854 }
1855 
1856 static void
haifa_note_dep(rtx_insn * elem,ds_t ds)1857 haifa_note_dep (rtx_insn *elem, ds_t ds)
1858 {
1859   dep_def _dep;
1860   dep_t dep = &_dep;
1861 
1862   init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1863   if (mark_as_hard)
1864     DEP_NONREG (dep) = 1;
1865   maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1866 }
1867 
1868 static void
note_reg_use(int r)1869 note_reg_use (int r)
1870 {
1871   if (sched_deps_info->note_reg_use)
1872     sched_deps_info->note_reg_use (r);
1873 }
1874 
1875 static void
note_reg_set(int r)1876 note_reg_set (int r)
1877 {
1878   if (sched_deps_info->note_reg_set)
1879     sched_deps_info->note_reg_set (r);
1880 }
1881 
1882 static void
note_reg_clobber(int r)1883 note_reg_clobber (int r)
1884 {
1885   if (sched_deps_info->note_reg_clobber)
1886     sched_deps_info->note_reg_clobber (r);
1887 }
1888 
1889 static void
note_mem_dep(rtx m1,rtx m2,rtx_insn * e,ds_t ds)1890 note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
1891 {
1892   if (sched_deps_info->note_mem_dep)
1893     sched_deps_info->note_mem_dep (m1, m2, e, ds);
1894 }
1895 
1896 static void
note_dep(rtx_insn * e,ds_t ds)1897 note_dep (rtx_insn *e, ds_t ds)
1898 {
1899   if (sched_deps_info->note_dep)
1900     sched_deps_info->note_dep (e, ds);
1901 }
1902 
1903 /* Return corresponding to DS reg_note.  */
1904 enum reg_note
ds_to_dt(ds_t ds)1905 ds_to_dt (ds_t ds)
1906 {
1907   if (ds & DEP_TRUE)
1908     return REG_DEP_TRUE;
1909   else if (ds & DEP_OUTPUT)
1910     return REG_DEP_OUTPUT;
1911   else if (ds & DEP_ANTI)
1912     return REG_DEP_ANTI;
1913   else
1914     {
1915       gcc_assert (ds & DEP_CONTROL);
1916       return REG_DEP_CONTROL;
1917     }
1918 }
1919 
1920 
1921 
1922 /* Functions for computation of info needed for register pressure
1923    sensitive insn scheduling.  */
1924 
1925 
1926 /* Allocate and return reg_use_data structure for REGNO and INSN.  */
1927 static struct reg_use_data *
create_insn_reg_use(int regno,rtx_insn * insn)1928 create_insn_reg_use (int regno, rtx_insn *insn)
1929 {
1930   struct reg_use_data *use;
1931 
1932   use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1933   use->regno = regno;
1934   use->insn = insn;
1935   use->next_insn_use = INSN_REG_USE_LIST (insn);
1936   INSN_REG_USE_LIST (insn) = use;
1937   return use;
1938 }
1939 
1940 /* Allocate reg_set_data structure for REGNO and INSN.  */
1941 static void
create_insn_reg_set(int regno,rtx insn)1942 create_insn_reg_set (int regno, rtx insn)
1943 {
1944   struct reg_set_data *set;
1945 
1946   set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1947   set->regno = regno;
1948   set->insn = insn;
1949   set->next_insn_set = INSN_REG_SET_LIST (insn);
1950   INSN_REG_SET_LIST (insn) = set;
1951 }
1952 
1953 /* Set up insn register uses for INSN and dependency context DEPS.  */
1954 static void
setup_insn_reg_uses(struct deps_desc * deps,rtx_insn * insn)1955 setup_insn_reg_uses (struct deps_desc *deps, rtx_insn *insn)
1956 {
1957   unsigned i;
1958   reg_set_iterator rsi;
1959   struct reg_use_data *use, *use2, *next;
1960   struct deps_reg *reg_last;
1961 
1962   EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1963     {
1964       if (i < FIRST_PSEUDO_REGISTER
1965 	  && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1966 	continue;
1967 
1968       if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1969 	  && ! REGNO_REG_SET_P (reg_pending_sets, i)
1970 	  && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1971 	/* Ignore use which is not dying.  */
1972 	continue;
1973 
1974       use = create_insn_reg_use (i, insn);
1975       use->next_regno_use = use;
1976       reg_last = &deps->reg_last[i];
1977 
1978       /* Create the cycle list of uses.  */
1979       for (rtx_insn_list *list = reg_last->uses; list; list = list->next ())
1980 	{
1981 	  use2 = create_insn_reg_use (i, list->insn ());
1982 	  next = use->next_regno_use;
1983 	  use->next_regno_use = use2;
1984 	  use2->next_regno_use = next;
1985 	}
1986     }
1987 }
1988 
1989 /* Register pressure info for the currently processed insn.  */
1990 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1991 
1992 /* Return TRUE if INSN has the use structure for REGNO.  */
1993 static bool
insn_use_p(rtx insn,int regno)1994 insn_use_p (rtx insn, int regno)
1995 {
1996   struct reg_use_data *use;
1997 
1998   for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1999     if (use->regno == regno)
2000       return true;
2001   return false;
2002 }
2003 
2004 /* Update the register pressure info after birth of pseudo register REGNO
2005    in INSN.  Arguments CLOBBER_P and UNUSED_P say correspondingly that
2006    the register is in clobber or unused after the insn.  */
2007 static void
mark_insn_pseudo_birth(rtx insn,int regno,bool clobber_p,bool unused_p)2008 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2009 {
2010   int incr, new_incr;
2011   enum reg_class cl;
2012 
2013   gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2014   cl = sched_regno_pressure_class[regno];
2015   if (cl != NO_REGS)
2016     {
2017       incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2018       if (clobber_p)
2019 	{
2020 	  new_incr = reg_pressure_info[cl].clobber_increase + incr;
2021 	  reg_pressure_info[cl].clobber_increase = new_incr;
2022 	}
2023       else if (unused_p)
2024 	{
2025 	  new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2026 	  reg_pressure_info[cl].unused_set_increase = new_incr;
2027 	}
2028       else
2029 	{
2030 	  new_incr = reg_pressure_info[cl].set_increase + incr;
2031 	  reg_pressure_info[cl].set_increase = new_incr;
2032 	  if (! insn_use_p (insn, regno))
2033 	    reg_pressure_info[cl].change += incr;
2034 	  create_insn_reg_set (regno, insn);
2035 	}
2036       gcc_assert (new_incr < (1 << INCREASE_BITS));
2037     }
2038 }
2039 
2040 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2041    hard registers involved in the birth.  */
2042 static void
mark_insn_hard_regno_birth(rtx insn,int regno,int nregs,bool clobber_p,bool unused_p)2043 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2044 			    bool clobber_p, bool unused_p)
2045 {
2046   enum reg_class cl;
2047   int new_incr, last = regno + nregs;
2048 
2049   while (regno < last)
2050     {
2051       gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2052       if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2053 	{
2054 	  cl = sched_regno_pressure_class[regno];
2055 	  if (cl != NO_REGS)
2056 	    {
2057 	      if (clobber_p)
2058 		{
2059 		  new_incr = reg_pressure_info[cl].clobber_increase + 1;
2060 		  reg_pressure_info[cl].clobber_increase = new_incr;
2061 		}
2062 	      else if (unused_p)
2063 		{
2064 		  new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2065 		  reg_pressure_info[cl].unused_set_increase = new_incr;
2066 		}
2067 	      else
2068 		{
2069 		  new_incr = reg_pressure_info[cl].set_increase + 1;
2070 		  reg_pressure_info[cl].set_increase = new_incr;
2071 		  if (! insn_use_p (insn, regno))
2072 		    reg_pressure_info[cl].change += 1;
2073 		  create_insn_reg_set (regno, insn);
2074 		}
2075 	      gcc_assert (new_incr < (1 << INCREASE_BITS));
2076 	    }
2077 	}
2078       regno++;
2079     }
2080 }
2081 
2082 /* Update the register pressure info after birth of pseudo or hard
2083    register REG in INSN.  Arguments CLOBBER_P and UNUSED_P say
2084    correspondingly that the register is in clobber or unused after the
2085    insn.  */
2086 static void
mark_insn_reg_birth(rtx insn,rtx reg,bool clobber_p,bool unused_p)2087 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2088 {
2089   int regno;
2090 
2091   if (GET_CODE (reg) == SUBREG)
2092     reg = SUBREG_REG (reg);
2093 
2094   if (! REG_P (reg))
2095     return;
2096 
2097   regno = REGNO (reg);
2098   if (regno < FIRST_PSEUDO_REGISTER)
2099     mark_insn_hard_regno_birth (insn, regno, REG_NREGS (reg),
2100 				clobber_p, unused_p);
2101   else
2102     mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2103 }
2104 
2105 /* Update the register pressure info after death of pseudo register
2106    REGNO.  */
2107 static void
mark_pseudo_death(int regno)2108 mark_pseudo_death (int regno)
2109 {
2110   int incr;
2111   enum reg_class cl;
2112 
2113   gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2114   cl = sched_regno_pressure_class[regno];
2115   if (cl != NO_REGS)
2116     {
2117       incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2118       reg_pressure_info[cl].change -= incr;
2119     }
2120 }
2121 
2122 /* Like mark_pseudo_death except that NREGS saying how many hard
2123    registers involved in the death.  */
2124 static void
mark_hard_regno_death(int regno,int nregs)2125 mark_hard_regno_death (int regno, int nregs)
2126 {
2127   enum reg_class cl;
2128   int last = regno + nregs;
2129 
2130   while (regno < last)
2131     {
2132       gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2133       if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2134 	{
2135 	  cl = sched_regno_pressure_class[regno];
2136 	  if (cl != NO_REGS)
2137 	    reg_pressure_info[cl].change -= 1;
2138 	}
2139       regno++;
2140     }
2141 }
2142 
2143 /* Update the register pressure info after death of pseudo or hard
2144    register REG.  */
2145 static void
mark_reg_death(rtx reg)2146 mark_reg_death (rtx reg)
2147 {
2148   int regno;
2149 
2150   if (GET_CODE (reg) == SUBREG)
2151     reg = SUBREG_REG (reg);
2152 
2153   if (! REG_P (reg))
2154     return;
2155 
2156   regno = REGNO (reg);
2157   if (regno < FIRST_PSEUDO_REGISTER)
2158     mark_hard_regno_death (regno, REG_NREGS (reg));
2159   else
2160     mark_pseudo_death (regno);
2161 }
2162 
2163 /* Process SETTER of REG.  DATA is an insn containing the setter.  */
2164 static void
mark_insn_reg_store(rtx reg,const_rtx setter,void * data)2165 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2166 {
2167   if (setter != NULL_RTX && GET_CODE (setter) != SET)
2168     return;
2169   mark_insn_reg_birth
2170     ((rtx) data, reg, false,
2171      find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2172 }
2173 
2174 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs.  */
2175 static void
mark_insn_reg_clobber(rtx reg,const_rtx setter,void * data)2176 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2177 {
2178   if (GET_CODE (setter) == CLOBBER)
2179     mark_insn_reg_birth ((rtx) data, reg, true, false);
2180 }
2181 
2182 /* Set up reg pressure info related to INSN.  */
2183 void
init_insn_reg_pressure_info(rtx_insn * insn)2184 init_insn_reg_pressure_info (rtx_insn *insn)
2185 {
2186   int i, len;
2187   enum reg_class cl;
2188   static struct reg_pressure_data *pressure_info;
2189   rtx link;
2190 
2191   gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
2192 
2193   if (! INSN_P (insn))
2194     return;
2195 
2196   for (i = 0; i < ira_pressure_classes_num; i++)
2197     {
2198       cl = ira_pressure_classes[i];
2199       reg_pressure_info[cl].clobber_increase = 0;
2200       reg_pressure_info[cl].set_increase = 0;
2201       reg_pressure_info[cl].unused_set_increase = 0;
2202       reg_pressure_info[cl].change = 0;
2203     }
2204 
2205   note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2206 
2207   note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2208 
2209   if (AUTO_INC_DEC)
2210     for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2211       if (REG_NOTE_KIND (link) == REG_INC)
2212 	mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2213 
2214   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2215     if (REG_NOTE_KIND (link) == REG_DEAD)
2216       mark_reg_death (XEXP (link, 0));
2217 
2218   len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2219   pressure_info
2220     = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2221   if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2222     INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2223 						    * sizeof (int), 1);
2224   for (i = 0; i < ira_pressure_classes_num; i++)
2225     {
2226       cl = ira_pressure_classes[i];
2227       pressure_info[i].clobber_increase
2228 	= reg_pressure_info[cl].clobber_increase;
2229       pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2230       pressure_info[i].unused_set_increase
2231 	= reg_pressure_info[cl].unused_set_increase;
2232       pressure_info[i].change = reg_pressure_info[cl].change;
2233     }
2234 }
2235 
2236 
2237 
2238 
2239 /* Internal variable for sched_analyze_[12] () functions.
2240    If it is nonzero, this means that sched_analyze_[12] looks
2241    at the most toplevel SET.  */
2242 static bool can_start_lhs_rhs_p;
2243 
2244 /* Extend reg info for the deps context DEPS given that
2245    we have just generated a register numbered REGNO.  */
2246 static void
extend_deps_reg_info(struct deps_desc * deps,int regno)2247 extend_deps_reg_info (struct deps_desc *deps, int regno)
2248 {
2249   int max_regno = regno + 1;
2250 
2251   gcc_assert (!reload_completed);
2252 
2253   /* In a readonly context, it would not hurt to extend info,
2254      but it should not be needed.  */
2255   if (reload_completed && deps->readonly)
2256     {
2257       deps->max_reg = max_regno;
2258       return;
2259     }
2260 
2261   if (max_regno > deps->max_reg)
2262     {
2263       deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2264                                    max_regno);
2265       memset (&deps->reg_last[deps->max_reg],
2266               0, (max_regno - deps->max_reg)
2267               * sizeof (struct deps_reg));
2268       deps->max_reg = max_regno;
2269     }
2270 }
2271 
2272 /* Extends REG_INFO_P if needed.  */
2273 void
maybe_extend_reg_info_p(void)2274 maybe_extend_reg_info_p (void)
2275 {
2276   /* Extend REG_INFO_P, if needed.  */
2277   if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2278     {
2279       size_t new_reg_info_p_size = max_regno + 128;
2280 
2281       gcc_assert (!reload_completed && sel_sched_p ());
2282 
2283       reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2284                                                     new_reg_info_p_size,
2285                                                     reg_info_p_size,
2286                                                     sizeof (*reg_info_p));
2287       reg_info_p_size = new_reg_info_p_size;
2288     }
2289 }
2290 
2291 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2292    The type of the reference is specified by REF and can be SET,
2293    CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE.  */
2294 
2295 static void
sched_analyze_reg(struct deps_desc * deps,int regno,machine_mode mode,enum rtx_code ref,rtx_insn * insn)2296 sched_analyze_reg (struct deps_desc *deps, int regno, machine_mode mode,
2297 		   enum rtx_code ref, rtx_insn *insn)
2298 {
2299   /* We could emit new pseudos in renaming.  Extend the reg structures.  */
2300   if (!reload_completed && sel_sched_p ()
2301       && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2302     extend_deps_reg_info (deps, regno);
2303 
2304   maybe_extend_reg_info_p ();
2305 
2306   /* A hard reg in a wide mode may really be multiple registers.
2307      If so, mark all of them just like the first.  */
2308   if (regno < FIRST_PSEUDO_REGISTER)
2309     {
2310       int i = hard_regno_nregs[regno][mode];
2311       if (ref == SET)
2312 	{
2313 	  while (--i >= 0)
2314 	    note_reg_set (regno + i);
2315 	}
2316       else if (ref == USE)
2317 	{
2318 	  while (--i >= 0)
2319 	    note_reg_use (regno + i);
2320 	}
2321       else
2322 	{
2323 	  while (--i >= 0)
2324 	    note_reg_clobber (regno + i);
2325 	}
2326     }
2327 
2328   /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2329      it does not reload.  Ignore these as they have served their
2330      purpose already.  */
2331   else if (regno >= deps->max_reg)
2332     {
2333       enum rtx_code code = GET_CODE (PATTERN (insn));
2334       gcc_assert (code == USE || code == CLOBBER);
2335     }
2336 
2337   else
2338     {
2339       if (ref == SET)
2340 	note_reg_set (regno);
2341       else if (ref == USE)
2342 	note_reg_use (regno);
2343       else
2344 	note_reg_clobber (regno);
2345 
2346       /* Pseudos that are REG_EQUIV to something may be replaced
2347 	 by that during reloading.  We need only add dependencies for
2348 	the address in the REG_EQUIV note.  */
2349       if (!reload_completed && get_reg_known_equiv_p (regno))
2350 	{
2351 	  rtx t = get_reg_known_value (regno);
2352 	  if (MEM_P (t))
2353 	    sched_analyze_2 (deps, XEXP (t, 0), insn);
2354 	}
2355 
2356       /* Don't let it cross a call after scheduling if it doesn't
2357 	 already cross one.  */
2358       if (REG_N_CALLS_CROSSED (regno) == 0)
2359 	{
2360 	  if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2361 	    deps->sched_before_next_call
2362 	      = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2363 	  else
2364 	    add_dependence_list (insn, deps->last_function_call, 1,
2365 				 REG_DEP_ANTI, false);
2366 	}
2367     }
2368 }
2369 
2370 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2371    rtx, X, creating all dependencies generated by the write to the
2372    destination of X, and reads of everything mentioned.  */
2373 
2374 static void
sched_analyze_1(struct deps_desc * deps,rtx x,rtx_insn * insn)2375 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2376 {
2377   rtx dest = XEXP (x, 0);
2378   enum rtx_code code = GET_CODE (x);
2379   bool cslr_p = can_start_lhs_rhs_p;
2380 
2381   can_start_lhs_rhs_p = false;
2382 
2383   gcc_assert (dest);
2384   if (dest == 0)
2385     return;
2386 
2387   if (cslr_p && sched_deps_info->start_lhs)
2388     sched_deps_info->start_lhs (dest);
2389 
2390   if (GET_CODE (dest) == PARALLEL)
2391     {
2392       int i;
2393 
2394       for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2395 	if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2396 	  sched_analyze_1 (deps,
2397 			   gen_rtx_CLOBBER (VOIDmode,
2398 					    XEXP (XVECEXP (dest, 0, i), 0)),
2399 			   insn);
2400 
2401       if (cslr_p && sched_deps_info->finish_lhs)
2402 	sched_deps_info->finish_lhs ();
2403 
2404       if (code == SET)
2405 	{
2406 	  can_start_lhs_rhs_p = cslr_p;
2407 
2408 	  sched_analyze_2 (deps, SET_SRC (x), insn);
2409 
2410 	  can_start_lhs_rhs_p = false;
2411 	}
2412 
2413       return;
2414     }
2415 
2416   while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2417 	 || GET_CODE (dest) == ZERO_EXTRACT)
2418     {
2419       if (GET_CODE (dest) == STRICT_LOW_PART
2420 	 || GET_CODE (dest) == ZERO_EXTRACT
2421 	 || df_read_modify_subreg_p (dest))
2422         {
2423 	  /* These both read and modify the result.  We must handle
2424              them as writes to get proper dependencies for following
2425              instructions.  We must handle them as reads to get proper
2426              dependencies from this to previous instructions.
2427              Thus we need to call sched_analyze_2.  */
2428 
2429 	  sched_analyze_2 (deps, XEXP (dest, 0), insn);
2430 	}
2431       if (GET_CODE (dest) == ZERO_EXTRACT)
2432 	{
2433 	  /* The second and third arguments are values read by this insn.  */
2434 	  sched_analyze_2 (deps, XEXP (dest, 1), insn);
2435 	  sched_analyze_2 (deps, XEXP (dest, 2), insn);
2436 	}
2437       dest = XEXP (dest, 0);
2438     }
2439 
2440   if (REG_P (dest))
2441     {
2442       int regno = REGNO (dest);
2443       machine_mode mode = GET_MODE (dest);
2444 
2445       sched_analyze_reg (deps, regno, mode, code, insn);
2446 
2447 #ifdef STACK_REGS
2448       /* Treat all writes to a stack register as modifying the TOS.  */
2449       if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2450 	{
2451 	  /* Avoid analyzing the same register twice.  */
2452 	  if (regno != FIRST_STACK_REG)
2453 	    sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2454 
2455 	  add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2456 			       FIRST_STACK_REG);
2457 	}
2458 #endif
2459     }
2460   else if (MEM_P (dest))
2461     {
2462       /* Writing memory.  */
2463       rtx t = dest;
2464 
2465       if (sched_deps_info->use_cselib)
2466 	{
2467 	  machine_mode address_mode = get_address_mode (dest);
2468 
2469 	  t = shallow_copy_rtx (dest);
2470 	  cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2471 				   GET_MODE (t), insn);
2472 	  XEXP (t, 0)
2473 	    = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2474 						insn);
2475 	}
2476       t = canon_rtx (t);
2477 
2478       /* Pending lists can't get larger with a readonly context.  */
2479       if (!deps->readonly
2480           && ((deps->pending_read_list_length + deps->pending_write_list_length)
2481               >= MAX_PENDING_LIST_LENGTH))
2482 	{
2483 	  /* Flush all pending reads and writes to prevent the pending lists
2484 	     from getting any larger.  Insn scheduling runs too slowly when
2485 	     these lists get long.  When compiling GCC with itself,
2486 	     this flush occurs 8 times for sparc, and 10 times for m88k using
2487 	     the default value of 32.  */
2488 	  flush_pending_lists (deps, insn, false, true);
2489 	}
2490       else
2491 	{
2492 	  rtx_insn_list *pending;
2493 	  rtx_expr_list *pending_mem;
2494 
2495 	  pending = deps->pending_read_insns;
2496 	  pending_mem = deps->pending_read_mems;
2497 	  while (pending)
2498 	    {
2499 	      if (anti_dependence (pending_mem->element (), t)
2500 		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2501 		note_mem_dep (t, pending_mem->element (), pending->insn (),
2502 			      DEP_ANTI);
2503 
2504 	      pending = pending->next ();
2505 	      pending_mem = pending_mem->next ();
2506 	    }
2507 
2508 	  pending = deps->pending_write_insns;
2509 	  pending_mem = deps->pending_write_mems;
2510 	  while (pending)
2511 	    {
2512 	      if (output_dependence (pending_mem->element (), t)
2513 		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2514 		note_mem_dep (t, pending_mem->element (),
2515 			      pending->insn (),
2516 			      DEP_OUTPUT);
2517 
2518 	      pending = pending->next ();
2519 	      pending_mem = pending_mem-> next ();
2520 	    }
2521 
2522 	  add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2523 			       REG_DEP_ANTI, true);
2524 	  add_dependence_list (insn, deps->pending_jump_insns, 1,
2525 			       REG_DEP_CONTROL, true);
2526 
2527           if (!deps->readonly)
2528             add_insn_mem_dependence (deps, false, insn, dest);
2529 	}
2530       sched_analyze_2 (deps, XEXP (dest, 0), insn);
2531     }
2532 
2533   if (cslr_p && sched_deps_info->finish_lhs)
2534     sched_deps_info->finish_lhs ();
2535 
2536   /* Analyze reads.  */
2537   if (GET_CODE (x) == SET)
2538     {
2539       can_start_lhs_rhs_p = cslr_p;
2540 
2541       sched_analyze_2 (deps, SET_SRC (x), insn);
2542 
2543       can_start_lhs_rhs_p = false;
2544     }
2545 }
2546 
2547 /* Analyze the uses of memory and registers in rtx X in INSN.  */
2548 static void
sched_analyze_2(struct deps_desc * deps,rtx x,rtx_insn * insn)2549 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2550 {
2551   int i;
2552   int j;
2553   enum rtx_code code;
2554   const char *fmt;
2555   bool cslr_p = can_start_lhs_rhs_p;
2556 
2557   can_start_lhs_rhs_p = false;
2558 
2559   gcc_assert (x);
2560   if (x == 0)
2561     return;
2562 
2563   if (cslr_p && sched_deps_info->start_rhs)
2564     sched_deps_info->start_rhs (x);
2565 
2566   code = GET_CODE (x);
2567 
2568   switch (code)
2569     {
2570     CASE_CONST_ANY:
2571     case SYMBOL_REF:
2572     case CONST:
2573     case LABEL_REF:
2574       /* Ignore constants.  */
2575       if (cslr_p && sched_deps_info->finish_rhs)
2576 	sched_deps_info->finish_rhs ();
2577 
2578       return;
2579 
2580     case CC0:
2581       if (!HAVE_cc0)
2582 	gcc_unreachable ();
2583 
2584       /* User of CC0 depends on immediately preceding insn.  */
2585       SCHED_GROUP_P (insn) = 1;
2586        /* Don't move CC0 setter to another block (it can set up the
2587         same flag for previous CC0 users which is safe).  */
2588       CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2589 
2590       if (cslr_p && sched_deps_info->finish_rhs)
2591 	sched_deps_info->finish_rhs ();
2592 
2593       return;
2594 
2595     case REG:
2596       {
2597 	int regno = REGNO (x);
2598 	machine_mode mode = GET_MODE (x);
2599 
2600 	sched_analyze_reg (deps, regno, mode, USE, insn);
2601 
2602 #ifdef STACK_REGS
2603       /* Treat all reads of a stack register as modifying the TOS.  */
2604       if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2605 	{
2606 	  /* Avoid analyzing the same register twice.  */
2607 	  if (regno != FIRST_STACK_REG)
2608 	    sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2609 	  sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2610 	}
2611 #endif
2612 
2613 	if (cslr_p && sched_deps_info->finish_rhs)
2614 	  sched_deps_info->finish_rhs ();
2615 
2616 	return;
2617       }
2618 
2619     case MEM:
2620       {
2621 	/* Reading memory.  */
2622 	rtx_insn_list *u;
2623 	rtx_insn_list *pending;
2624 	rtx_expr_list *pending_mem;
2625 	rtx t = x;
2626 
2627 	if (sched_deps_info->use_cselib)
2628 	  {
2629 	    machine_mode address_mode = get_address_mode (t);
2630 
2631 	    t = shallow_copy_rtx (t);
2632 	    cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2633 				     GET_MODE (t), insn);
2634 	    XEXP (t, 0)
2635 	      = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2636 						  insn);
2637 	  }
2638 
2639 	if (!DEBUG_INSN_P (insn))
2640 	  {
2641 	    t = canon_rtx (t);
2642 	    pending = deps->pending_read_insns;
2643 	    pending_mem = deps->pending_read_mems;
2644 	    while (pending)
2645 	      {
2646 		if (read_dependence (pending_mem->element (), t)
2647 		    && ! sched_insns_conditions_mutex_p (insn,
2648 							 pending->insn ()))
2649 		  note_mem_dep (t, pending_mem->element (),
2650 				pending->insn (),
2651 				DEP_ANTI);
2652 
2653 		pending = pending->next ();
2654 		pending_mem = pending_mem->next ();
2655 	      }
2656 
2657 	    pending = deps->pending_write_insns;
2658 	    pending_mem = deps->pending_write_mems;
2659 	    while (pending)
2660 	      {
2661 		if (true_dependence (pending_mem->element (), VOIDmode, t)
2662 		    && ! sched_insns_conditions_mutex_p (insn,
2663 							 pending->insn ()))
2664 		  note_mem_dep (t, pending_mem->element (),
2665 				pending->insn (),
2666 				sched_deps_info->generate_spec_deps
2667 				? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2668 
2669 		pending = pending->next ();
2670 		pending_mem = pending_mem->next ();
2671 	      }
2672 
2673 	    for (u = deps->last_pending_memory_flush; u; u = u->next ())
2674 	      add_dependence (insn, u->insn (), REG_DEP_ANTI);
2675 
2676 	    for (u = deps->pending_jump_insns; u; u = u->next ())
2677 	      if (deps_may_trap_p (x))
2678 		{
2679 		  if ((sched_deps_info->generate_spec_deps)
2680 		      && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2681 		    {
2682 		      ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2683 					      MAX_DEP_WEAK);
2684 
2685 		      note_dep (u->insn (), ds);
2686 		    }
2687 		  else
2688 		    add_dependence (insn, u->insn (), REG_DEP_CONTROL);
2689 		}
2690 	  }
2691 
2692 	/* Always add these dependencies to pending_reads, since
2693 	   this insn may be followed by a write.  */
2694 	if (!deps->readonly)
2695 	  {
2696 	    if ((deps->pending_read_list_length
2697 		 + deps->pending_write_list_length)
2698 		>= MAX_PENDING_LIST_LENGTH
2699 		&& !DEBUG_INSN_P (insn))
2700 	      flush_pending_lists (deps, insn, true, true);
2701 	    add_insn_mem_dependence (deps, true, insn, x);
2702 	  }
2703 
2704 	sched_analyze_2 (deps, XEXP (x, 0), insn);
2705 
2706 	if (cslr_p && sched_deps_info->finish_rhs)
2707 	  sched_deps_info->finish_rhs ();
2708 
2709 	return;
2710       }
2711 
2712     /* Force pending stores to memory in case a trap handler needs them.
2713        Also force pending loads from memory; loads and stores can segfault
2714        and the signal handler won't be triggered if the trap insn was moved
2715        above load or store insn.  */
2716     case TRAP_IF:
2717       flush_pending_lists (deps, insn, true, true);
2718       break;
2719 
2720     case PREFETCH:
2721       if (PREFETCH_SCHEDULE_BARRIER_P (x))
2722 	reg_pending_barrier = TRUE_BARRIER;
2723       /* Prefetch insn contains addresses only.  So if the prefetch
2724 	 address has no registers, there will be no dependencies on
2725 	 the prefetch insn.  This is wrong with result code
2726 	 correctness point of view as such prefetch can be moved below
2727 	 a jump insn which usually generates MOVE_BARRIER preventing
2728 	 to move insns containing registers or memories through the
2729 	 barrier.  It is also wrong with generated code performance
2730 	 point of view as prefetch withouth dependecies will have a
2731 	 tendency to be issued later instead of earlier.  It is hard
2732 	 to generate accurate dependencies for prefetch insns as
2733 	 prefetch has only the start address but it is better to have
2734 	 something than nothing.  */
2735       if (!deps->readonly)
2736 	{
2737 	  rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2738 	  if (sched_deps_info->use_cselib)
2739 	    cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2740 	  add_insn_mem_dependence (deps, true, insn, x);
2741 	}
2742       break;
2743 
2744     case UNSPEC_VOLATILE:
2745       flush_pending_lists (deps, insn, true, true);
2746       /* FALLTHRU */
2747 
2748     case ASM_OPERANDS:
2749     case ASM_INPUT:
2750       {
2751 	/* Traditional and volatile asm instructions must be considered to use
2752 	   and clobber all hard registers, all pseudo-registers and all of
2753 	   memory.  So must TRAP_IF and UNSPEC_VOLATILE operations.
2754 
2755 	   Consider for instance a volatile asm that changes the fpu rounding
2756 	   mode.  An insn should not be moved across this even if it only uses
2757 	   pseudo-regs because it might give an incorrectly rounded result.  */
2758 	if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2759 	    && !DEBUG_INSN_P (insn))
2760 	  reg_pending_barrier = TRUE_BARRIER;
2761 
2762 	/* For all ASM_OPERANDS, we must traverse the vector of input operands.
2763 	   We can not just fall through here since then we would be confused
2764 	   by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2765 	   traditional asms unlike their normal usage.  */
2766 
2767 	if (code == ASM_OPERANDS)
2768 	  {
2769 	    for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2770 	      sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2771 
2772 	    if (cslr_p && sched_deps_info->finish_rhs)
2773 	      sched_deps_info->finish_rhs ();
2774 
2775 	    return;
2776 	  }
2777 	break;
2778       }
2779 
2780     case PRE_DEC:
2781     case POST_DEC:
2782     case PRE_INC:
2783     case POST_INC:
2784       /* These both read and modify the result.  We must handle them as writes
2785          to get proper dependencies for following instructions.  We must handle
2786          them as reads to get proper dependencies from this to previous
2787          instructions.  Thus we need to pass them to both sched_analyze_1
2788          and sched_analyze_2.  We must call sched_analyze_2 first in order
2789          to get the proper antecedent for the read.  */
2790       sched_analyze_2 (deps, XEXP (x, 0), insn);
2791       sched_analyze_1 (deps, x, insn);
2792 
2793       if (cslr_p && sched_deps_info->finish_rhs)
2794 	sched_deps_info->finish_rhs ();
2795 
2796       return;
2797 
2798     case POST_MODIFY:
2799     case PRE_MODIFY:
2800       /* op0 = op0 + op1 */
2801       sched_analyze_2 (deps, XEXP (x, 0), insn);
2802       sched_analyze_2 (deps, XEXP (x, 1), insn);
2803       sched_analyze_1 (deps, x, insn);
2804 
2805       if (cslr_p && sched_deps_info->finish_rhs)
2806 	sched_deps_info->finish_rhs ();
2807 
2808       return;
2809 
2810     default:
2811       break;
2812     }
2813 
2814   /* Other cases: walk the insn.  */
2815   fmt = GET_RTX_FORMAT (code);
2816   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2817     {
2818       if (fmt[i] == 'e')
2819 	sched_analyze_2 (deps, XEXP (x, i), insn);
2820       else if (fmt[i] == 'E')
2821 	for (j = 0; j < XVECLEN (x, i); j++)
2822 	  sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2823     }
2824 
2825   if (cslr_p && sched_deps_info->finish_rhs)
2826     sched_deps_info->finish_rhs ();
2827 }
2828 
2829 /* Try to group two fusible insns together to prevent scheduler
2830    from scheduling them apart.  */
2831 
2832 static void
sched_macro_fuse_insns(rtx_insn * insn)2833 sched_macro_fuse_insns (rtx_insn *insn)
2834 {
2835   rtx_insn *prev;
2836 
2837   if (any_condjump_p (insn))
2838     {
2839       unsigned int condreg1, condreg2;
2840       rtx cc_reg_1;
2841       targetm.fixed_condition_code_regs (&condreg1, &condreg2);
2842       cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2843       prev = prev_nonnote_nondebug_insn (insn);
2844       if (!reg_referenced_p (cc_reg_1, PATTERN (insn))
2845           || !prev
2846           || !modified_in_p (cc_reg_1, prev))
2847         return;
2848     }
2849   else
2850     {
2851       rtx insn_set = single_set (insn);
2852 
2853       prev = prev_nonnote_nondebug_insn (insn);
2854       if (!prev
2855           || !insn_set
2856           || !single_set (prev))
2857         return;
2858 
2859     }
2860 
2861   if (targetm.sched.macro_fusion_pair_p (prev, insn))
2862     SCHED_GROUP_P (insn) = 1;
2863 
2864 }
2865 
2866 /* Get the implicit reg pending clobbers for INSN and save them in TEMP.  */
2867 void
get_implicit_reg_pending_clobbers(HARD_REG_SET * temp,rtx_insn * insn)2868 get_implicit_reg_pending_clobbers (HARD_REG_SET *temp, rtx_insn *insn)
2869 {
2870   extract_insn (insn);
2871   preprocess_constraints (insn);
2872   alternative_mask preferred = get_preferred_alternatives (insn);
2873   ira_implicitly_set_insn_hard_regs (temp, preferred);
2874   AND_COMPL_HARD_REG_SET (*temp, ira_no_alloc_regs);
2875 }
2876 
2877 /* Analyze an INSN with pattern X to find all dependencies.  */
2878 static void
sched_analyze_insn(struct deps_desc * deps,rtx x,rtx_insn * insn)2879 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
2880 {
2881   RTX_CODE code = GET_CODE (x);
2882   rtx link;
2883   unsigned i;
2884   reg_set_iterator rsi;
2885 
2886   if (! reload_completed)
2887     {
2888       HARD_REG_SET temp;
2889       get_implicit_reg_pending_clobbers (&temp, insn);
2890       IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2891     }
2892 
2893   can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2894 			 && code == SET);
2895 
2896   /* Group compare and branch insns for macro-fusion.  */
2897   if (targetm.sched.macro_fusion_p
2898       && targetm.sched.macro_fusion_p ())
2899     sched_macro_fuse_insns (insn);
2900 
2901   if (may_trap_p (x))
2902     /* Avoid moving trapping instructions across function calls that might
2903        not always return.  */
2904     add_dependence_list (insn, deps->last_function_call_may_noreturn,
2905 			 1, REG_DEP_ANTI, true);
2906 
2907   /* We must avoid creating a situation in which two successors of the
2908      current block have different unwind info after scheduling.  If at any
2909      point the two paths re-join this leads to incorrect unwind info.  */
2910   /* ??? There are certain situations involving a forced frame pointer in
2911      which, with extra effort, we could fix up the unwind info at a later
2912      CFG join.  However, it seems better to notice these cases earlier
2913      during prologue generation and avoid marking the frame pointer setup
2914      as frame-related at all.  */
2915   if (RTX_FRAME_RELATED_P (insn))
2916     {
2917       /* Make sure prologue insn is scheduled before next jump.  */
2918       deps->sched_before_next_jump
2919 	= alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2920 
2921       /* Make sure epilogue insn is scheduled after preceding jumps.  */
2922       add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2923 			   true);
2924     }
2925 
2926   if (code == COND_EXEC)
2927     {
2928       sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2929 
2930       /* ??? Should be recording conditions so we reduce the number of
2931 	 false dependencies.  */
2932       x = COND_EXEC_CODE (x);
2933       code = GET_CODE (x);
2934     }
2935   if (code == SET || code == CLOBBER)
2936     {
2937       sched_analyze_1 (deps, x, insn);
2938 
2939       /* Bare clobber insns are used for letting life analysis, reg-stack
2940 	 and others know that a value is dead.  Depend on the last call
2941 	 instruction so that reg-stack won't get confused.  */
2942       if (code == CLOBBER)
2943 	add_dependence_list (insn, deps->last_function_call, 1,
2944 			     REG_DEP_OUTPUT, true);
2945     }
2946   else if (code == PARALLEL)
2947     {
2948       for (i = XVECLEN (x, 0); i--;)
2949 	{
2950 	  rtx sub = XVECEXP (x, 0, i);
2951 	  code = GET_CODE (sub);
2952 
2953 	  if (code == COND_EXEC)
2954 	    {
2955 	      sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2956 	      sub = COND_EXEC_CODE (sub);
2957 	      code = GET_CODE (sub);
2958 	    }
2959 	  if (code == SET || code == CLOBBER)
2960 	    sched_analyze_1 (deps, sub, insn);
2961 	  else
2962 	    sched_analyze_2 (deps, sub, insn);
2963 	}
2964     }
2965   else
2966     sched_analyze_2 (deps, x, insn);
2967 
2968   /* Mark registers CLOBBERED or used by called function.  */
2969   if (CALL_P (insn))
2970     {
2971       for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2972 	{
2973 	  if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2974 	    sched_analyze_1 (deps, XEXP (link, 0), insn);
2975 	  else if (GET_CODE (XEXP (link, 0)) != SET)
2976 	    sched_analyze_2 (deps, XEXP (link, 0), insn);
2977 	}
2978       /* Don't schedule anything after a tail call, tail call needs
2979 	 to use at least all call-saved registers.  */
2980       if (SIBLING_CALL_P (insn))
2981 	reg_pending_barrier = TRUE_BARRIER;
2982       else if (find_reg_note (insn, REG_SETJMP, NULL))
2983 	reg_pending_barrier = MOVE_BARRIER;
2984     }
2985 
2986   if (JUMP_P (insn))
2987     {
2988       rtx_insn *next = next_nonnote_nondebug_insn (insn);
2989       if (next && BARRIER_P (next))
2990 	reg_pending_barrier = MOVE_BARRIER;
2991       else
2992 	{
2993 	  rtx_insn_list *pending;
2994 	  rtx_expr_list *pending_mem;
2995 
2996           if (sched_deps_info->compute_jump_reg_dependencies)
2997             {
2998               (*sched_deps_info->compute_jump_reg_dependencies)
2999 		(insn, reg_pending_control_uses);
3000 
3001               /* Make latency of jump equal to 0 by using anti-dependence.  */
3002               EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3003                 {
3004                   struct deps_reg *reg_last = &deps->reg_last[i];
3005                   add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
3006 				       false);
3007                   add_dependence_list (insn, reg_last->implicit_sets,
3008 				       0, REG_DEP_ANTI, false);
3009                   add_dependence_list (insn, reg_last->clobbers, 0,
3010 				       REG_DEP_ANTI, false);
3011                 }
3012             }
3013 
3014 	  /* All memory writes and volatile reads must happen before the
3015 	     jump.  Non-volatile reads must happen before the jump iff
3016 	     the result is needed by the above register used mask.  */
3017 
3018 	  pending = deps->pending_write_insns;
3019 	  pending_mem = deps->pending_write_mems;
3020 	  while (pending)
3021 	    {
3022 	      if (! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3023 		add_dependence (insn, pending->insn (),
3024 				REG_DEP_OUTPUT);
3025 	      pending = pending->next ();
3026 	      pending_mem = pending_mem->next ();
3027 	    }
3028 
3029 	  pending = deps->pending_read_insns;
3030 	  pending_mem = deps->pending_read_mems;
3031 	  while (pending)
3032 	    {
3033 	      if (MEM_VOLATILE_P (pending_mem->element ())
3034 		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3035 		add_dependence (insn, pending->insn (),
3036 				REG_DEP_OUTPUT);
3037 	      pending = pending->next ();
3038 	      pending_mem = pending_mem->next ();
3039 	    }
3040 
3041 	  add_dependence_list (insn, deps->last_pending_memory_flush, 1,
3042 			       REG_DEP_ANTI, true);
3043 	  add_dependence_list (insn, deps->pending_jump_insns, 1,
3044 			       REG_DEP_ANTI, true);
3045 	}
3046     }
3047 
3048   /* If this instruction can throw an exception, then moving it changes
3049      where block boundaries fall.  This is mighty confusing elsewhere.
3050      Therefore, prevent such an instruction from being moved.  Same for
3051      non-jump instructions that define block boundaries.
3052      ??? Unclear whether this is still necessary in EBB mode.  If not,
3053      add_branch_dependences should be adjusted for RGN mode instead.  */
3054   if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3055       || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
3056     reg_pending_barrier = MOVE_BARRIER;
3057 
3058   if (sched_pressure != SCHED_PRESSURE_NONE)
3059     {
3060       setup_insn_reg_uses (deps, insn);
3061       init_insn_reg_pressure_info (insn);
3062     }
3063 
3064   /* Add register dependencies for insn.  */
3065   if (DEBUG_INSN_P (insn))
3066     {
3067       rtx_insn *prev = deps->last_debug_insn;
3068       rtx_insn_list *u;
3069 
3070       if (!deps->readonly)
3071 	deps->last_debug_insn = insn;
3072 
3073       if (prev)
3074 	add_dependence (insn, prev, REG_DEP_ANTI);
3075 
3076       add_dependence_list (insn, deps->last_function_call, 1,
3077 			   REG_DEP_ANTI, false);
3078 
3079       if (!sel_sched_p ())
3080 	for (u = deps->last_pending_memory_flush; u; u = u->next ())
3081 	  add_dependence (insn, u->insn (), REG_DEP_ANTI);
3082 
3083       EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3084 	{
3085 	  struct deps_reg *reg_last = &deps->reg_last[i];
3086 	  add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
3087 	  /* There's no point in making REG_DEP_CONTROL dependencies for
3088 	     debug insns.  */
3089 	  add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3090 			       false);
3091 
3092 	  if (!deps->readonly)
3093 	    reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3094 	}
3095       CLEAR_REG_SET (reg_pending_uses);
3096 
3097       /* Quite often, a debug insn will refer to stuff in the
3098 	 previous instruction, but the reason we want this
3099 	 dependency here is to make sure the scheduler doesn't
3100 	 gratuitously move a debug insn ahead.  This could dirty
3101 	 DF flags and cause additional analysis that wouldn't have
3102 	 occurred in compilation without debug insns, and such
3103 	 additional analysis can modify the generated code.  */
3104       prev = PREV_INSN (insn);
3105 
3106       if (prev && NONDEBUG_INSN_P (prev))
3107 	add_dependence (insn, prev, REG_DEP_ANTI);
3108     }
3109   else
3110     {
3111       regset_head set_or_clobbered;
3112 
3113       EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3114 	{
3115 	  struct deps_reg *reg_last = &deps->reg_last[i];
3116 	  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3117 	  add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3118 			       false);
3119 	  add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3120 			       false);
3121 
3122 	  if (!deps->readonly)
3123 	    {
3124 	      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3125 	      reg_last->uses_length++;
3126 	    }
3127 	}
3128 
3129       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3130 	if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3131 	  {
3132 	    struct deps_reg *reg_last = &deps->reg_last[i];
3133 	    add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3134 	    add_dependence_list (insn, reg_last->implicit_sets, 0,
3135 				 REG_DEP_ANTI, false);
3136 	    add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3137 				 false);
3138 
3139 	    if (!deps->readonly)
3140 	      {
3141 		reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3142 		reg_last->uses_length++;
3143 	      }
3144 	  }
3145 
3146       if (targetm.sched.exposed_pipeline)
3147 	{
3148 	  INIT_REG_SET (&set_or_clobbered);
3149 	  bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3150 		      reg_pending_sets);
3151 	  EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3152 	    {
3153 	      struct deps_reg *reg_last = &deps->reg_last[i];
3154 	      rtx list;
3155 	      for (list = reg_last->uses; list; list = XEXP (list, 1))
3156 		{
3157 		  rtx other = XEXP (list, 0);
3158 		  if (INSN_CACHED_COND (other) != const_true_rtx
3159 		      && refers_to_regno_p (i, INSN_CACHED_COND (other)))
3160 		    INSN_CACHED_COND (other) = const_true_rtx;
3161 		}
3162 	    }
3163 	}
3164 
3165       /* If the current insn is conditional, we can't free any
3166 	 of the lists.  */
3167       if (sched_has_condition_p (insn))
3168 	{
3169 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3170 	    {
3171 	      struct deps_reg *reg_last = &deps->reg_last[i];
3172 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3173 				   false);
3174 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3175 				   REG_DEP_ANTI, false);
3176 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3177 				   false);
3178 	      add_dependence_list (insn, reg_last->control_uses, 0,
3179 				   REG_DEP_CONTROL, false);
3180 
3181 	      if (!deps->readonly)
3182 		{
3183 		  reg_last->clobbers
3184 		    = alloc_INSN_LIST (insn, reg_last->clobbers);
3185 		  reg_last->clobbers_length++;
3186 		}
3187 	    }
3188 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3189 	    {
3190 	      struct deps_reg *reg_last = &deps->reg_last[i];
3191 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3192 				   false);
3193 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3194 				   REG_DEP_ANTI, false);
3195 	      add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3196 				   false);
3197 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3198 				   false);
3199 	      add_dependence_list (insn, reg_last->control_uses, 0,
3200 				   REG_DEP_CONTROL, false);
3201 
3202 	      if (!deps->readonly)
3203 		reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3204 	    }
3205 	}
3206       else
3207 	{
3208 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3209 	    {
3210 	      struct deps_reg *reg_last = &deps->reg_last[i];
3211 	      if (reg_last->uses_length >= MAX_PENDING_LIST_LENGTH
3212 		  || reg_last->clobbers_length >= MAX_PENDING_LIST_LENGTH)
3213 		{
3214 		  add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3215 						REG_DEP_OUTPUT, false);
3216 		  add_dependence_list_and_free (deps, insn,
3217 						&reg_last->implicit_sets, 0,
3218 						REG_DEP_ANTI, false);
3219 		  add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3220 						REG_DEP_ANTI, false);
3221 		  add_dependence_list_and_free (deps, insn,
3222 						&reg_last->control_uses, 0,
3223 						REG_DEP_ANTI, false);
3224 		  add_dependence_list_and_free (deps, insn,
3225 						&reg_last->clobbers, 0,
3226 						REG_DEP_OUTPUT, false);
3227 
3228 		  if (!deps->readonly)
3229 		    {
3230 		      reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3231 		      reg_last->clobbers_length = 0;
3232 		      reg_last->uses_length = 0;
3233 		    }
3234 		}
3235 	      else
3236 		{
3237 		  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3238 				       false);
3239 		  add_dependence_list (insn, reg_last->implicit_sets, 0,
3240 				       REG_DEP_ANTI, false);
3241 		  add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3242 				       false);
3243 		  add_dependence_list (insn, reg_last->control_uses, 0,
3244 				       REG_DEP_CONTROL, false);
3245 		}
3246 
3247 	      if (!deps->readonly)
3248 		{
3249 		  reg_last->clobbers_length++;
3250 		  reg_last->clobbers
3251 		    = alloc_INSN_LIST (insn, reg_last->clobbers);
3252 		}
3253 	    }
3254 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3255 	    {
3256 	      struct deps_reg *reg_last = &deps->reg_last[i];
3257 
3258 	      add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3259 					    REG_DEP_OUTPUT, false);
3260 	      add_dependence_list_and_free (deps, insn,
3261 					    &reg_last->implicit_sets,
3262 					    0, REG_DEP_ANTI, false);
3263 	      add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3264 					    REG_DEP_OUTPUT, false);
3265 	      add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3266 					    REG_DEP_ANTI, false);
3267 	      add_dependence_list (insn, reg_last->control_uses, 0,
3268 				   REG_DEP_CONTROL, false);
3269 
3270 	      if (!deps->readonly)
3271 		{
3272 		  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3273 		  reg_last->uses_length = 0;
3274 		  reg_last->clobbers_length = 0;
3275 		}
3276 	    }
3277 	}
3278       if (!deps->readonly)
3279 	{
3280 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3281 	    {
3282 	      struct deps_reg *reg_last = &deps->reg_last[i];
3283 	      reg_last->control_uses
3284 		= alloc_INSN_LIST (insn, reg_last->control_uses);
3285 	    }
3286 	}
3287     }
3288 
3289   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3290     if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3291       {
3292 	struct deps_reg *reg_last = &deps->reg_last[i];
3293 	add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3294 	add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3295 	add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3296 	add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3297 			     false);
3298 
3299 	if (!deps->readonly)
3300 	  reg_last->implicit_sets
3301 	    = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3302       }
3303 
3304   if (!deps->readonly)
3305     {
3306       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3307       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3308       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3309       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3310 	if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3311 	    || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3312 	  SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3313 
3314       /* Set up the pending barrier found.  */
3315       deps->last_reg_pending_barrier = reg_pending_barrier;
3316     }
3317 
3318   CLEAR_REG_SET (reg_pending_uses);
3319   CLEAR_REG_SET (reg_pending_clobbers);
3320   CLEAR_REG_SET (reg_pending_sets);
3321   CLEAR_REG_SET (reg_pending_control_uses);
3322   CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3323   CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3324 
3325   /* Add dependencies if a scheduling barrier was found.  */
3326   if (reg_pending_barrier)
3327     {
3328       /* In the case of barrier the most added dependencies are not
3329          real, so we use anti-dependence here.  */
3330       if (sched_has_condition_p (insn))
3331 	{
3332 	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3333 	    {
3334 	      struct deps_reg *reg_last = &deps->reg_last[i];
3335 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3336 				   true);
3337 	      add_dependence_list (insn, reg_last->sets, 0,
3338 				   reg_pending_barrier == TRUE_BARRIER
3339 				   ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3340 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3341 				   REG_DEP_ANTI, true);
3342 	      add_dependence_list (insn, reg_last->clobbers, 0,
3343 				   reg_pending_barrier == TRUE_BARRIER
3344 				   ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3345 	    }
3346 	}
3347       else
3348 	{
3349 	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3350 	    {
3351 	      struct deps_reg *reg_last = &deps->reg_last[i];
3352 	      add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3353 					    REG_DEP_ANTI, true);
3354 	      add_dependence_list_and_free (deps, insn,
3355 					    &reg_last->control_uses, 0,
3356 					    REG_DEP_CONTROL, true);
3357 	      add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3358 					    reg_pending_barrier == TRUE_BARRIER
3359 					    ? REG_DEP_TRUE : REG_DEP_ANTI,
3360 					    true);
3361 	      add_dependence_list_and_free (deps, insn,
3362 					    &reg_last->implicit_sets, 0,
3363 					    REG_DEP_ANTI, true);
3364 	      add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3365 					    reg_pending_barrier == TRUE_BARRIER
3366 					    ? REG_DEP_TRUE : REG_DEP_ANTI,
3367 					    true);
3368 
3369               if (!deps->readonly)
3370                 {
3371                   reg_last->uses_length = 0;
3372                   reg_last->clobbers_length = 0;
3373                 }
3374 	    }
3375 	}
3376 
3377       if (!deps->readonly)
3378         for (i = 0; i < (unsigned)deps->max_reg; i++)
3379           {
3380             struct deps_reg *reg_last = &deps->reg_last[i];
3381             reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3382             SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3383           }
3384 
3385       /* Don't flush pending lists on speculative checks for
3386 	 selective scheduling.  */
3387       if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
3388 	flush_pending_lists (deps, insn, true, true);
3389 
3390       reg_pending_barrier = NOT_A_BARRIER;
3391     }
3392 
3393   /* If a post-call group is still open, see if it should remain so.
3394      This insn must be a simple move of a hard reg to a pseudo or
3395      vice-versa.
3396 
3397      We must avoid moving these insns for correctness on targets
3398      with small register classes, and for special registers like
3399      PIC_OFFSET_TABLE_REGNUM.  For simplicity, extend this to all
3400      hard regs for all targets.  */
3401 
3402   if (deps->in_post_call_group_p)
3403     {
3404       rtx tmp, set = single_set (insn);
3405       int src_regno, dest_regno;
3406 
3407       if (set == NULL)
3408 	{
3409 	  if (DEBUG_INSN_P (insn))
3410 	    /* We don't want to mark debug insns as part of the same
3411 	       sched group.  We know they really aren't, but if we use
3412 	       debug insns to tell that a call group is over, we'll
3413 	       get different code if debug insns are not there and
3414 	       instructions that follow seem like they should be part
3415 	       of the call group.
3416 
3417 	       Also, if we did, chain_to_prev_insn would move the
3418 	       deps of the debug insn to the call insn, modifying
3419 	       non-debug post-dependency counts of the debug insn
3420 	       dependencies and otherwise messing with the scheduling
3421 	       order.
3422 
3423 	       Instead, let such debug insns be scheduled freely, but
3424 	       keep the call group open in case there are insns that
3425 	       should be part of it afterwards.  Since we grant debug
3426 	       insns higher priority than even sched group insns, it
3427 	       will all turn out all right.  */
3428 	    goto debug_dont_end_call_group;
3429 	  else
3430 	    goto end_call_group;
3431 	}
3432 
3433       tmp = SET_DEST (set);
3434       if (GET_CODE (tmp) == SUBREG)
3435 	tmp = SUBREG_REG (tmp);
3436       if (REG_P (tmp))
3437 	dest_regno = REGNO (tmp);
3438       else
3439 	goto end_call_group;
3440 
3441       tmp = SET_SRC (set);
3442       if (GET_CODE (tmp) == SUBREG)
3443 	tmp = SUBREG_REG (tmp);
3444       if ((GET_CODE (tmp) == PLUS
3445 	   || GET_CODE (tmp) == MINUS)
3446 	  && REG_P (XEXP (tmp, 0))
3447 	  && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3448 	  && dest_regno == STACK_POINTER_REGNUM)
3449 	src_regno = STACK_POINTER_REGNUM;
3450       else if (REG_P (tmp))
3451 	src_regno = REGNO (tmp);
3452       else
3453 	goto end_call_group;
3454 
3455       if (src_regno < FIRST_PSEUDO_REGISTER
3456 	  || dest_regno < FIRST_PSEUDO_REGISTER)
3457 	{
3458 	  if (!deps->readonly
3459               && deps->in_post_call_group_p == post_call_initial)
3460 	    deps->in_post_call_group_p = post_call;
3461 
3462           if (!sel_sched_p () || sched_emulate_haifa_p)
3463             {
3464               SCHED_GROUP_P (insn) = 1;
3465               CANT_MOVE (insn) = 1;
3466             }
3467 	}
3468       else
3469 	{
3470 	end_call_group:
3471           if (!deps->readonly)
3472             deps->in_post_call_group_p = not_post_call;
3473 	}
3474     }
3475 
3476  debug_dont_end_call_group:
3477   if ((current_sched_info->flags & DO_SPECULATION)
3478       && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3479     /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3480        be speculated.  */
3481     {
3482       if (sel_sched_p ())
3483         sel_mark_hard_insn (insn);
3484       else
3485         {
3486           sd_iterator_def sd_it;
3487           dep_t dep;
3488 
3489           for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3490                sd_iterator_cond (&sd_it, &dep);)
3491             change_spec_dep_to_hard (sd_it);
3492         }
3493     }
3494 
3495   /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3496      honor their original ordering.  */
3497   if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3498     {
3499       if (deps->last_args_size)
3500 	add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
3501       if (!deps->readonly)
3502 	deps->last_args_size = insn;
3503     }
3504 }
3505 
3506 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3507    longjmp, loop forever, ...).  */
3508 /* FIXME: Why can't this function just use flags_from_decl_or_type and
3509    test for ECF_NORETURN?  */
3510 static bool
call_may_noreturn_p(rtx_insn * insn)3511 call_may_noreturn_p (rtx_insn *insn)
3512 {
3513   rtx call;
3514 
3515   /* const or pure calls that aren't looping will always return.  */
3516   if (RTL_CONST_OR_PURE_CALL_P (insn)
3517       && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3518     return false;
3519 
3520   call = get_call_rtx_from (insn);
3521   if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3522     {
3523       rtx symbol = XEXP (XEXP (call, 0), 0);
3524       if (SYMBOL_REF_DECL (symbol)
3525 	  && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3526 	{
3527 	  if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3528 	      == BUILT_IN_NORMAL)
3529 	    switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3530 	      {
3531 	      case BUILT_IN_BCMP:
3532 	      case BUILT_IN_BCOPY:
3533 	      case BUILT_IN_BZERO:
3534 	      case BUILT_IN_INDEX:
3535 	      case BUILT_IN_MEMCHR:
3536 	      case BUILT_IN_MEMCMP:
3537 	      case BUILT_IN_MEMCPY:
3538 	      case BUILT_IN_MEMMOVE:
3539 	      case BUILT_IN_MEMPCPY:
3540 	      case BUILT_IN_MEMSET:
3541 	      case BUILT_IN_RINDEX:
3542 	      case BUILT_IN_STPCPY:
3543 	      case BUILT_IN_STPNCPY:
3544 	      case BUILT_IN_STRCAT:
3545 	      case BUILT_IN_STRCHR:
3546 	      case BUILT_IN_STRCMP:
3547 	      case BUILT_IN_STRCPY:
3548 	      case BUILT_IN_STRCSPN:
3549 	      case BUILT_IN_STRLEN:
3550 	      case BUILT_IN_STRNCAT:
3551 	      case BUILT_IN_STRNCMP:
3552 	      case BUILT_IN_STRNCPY:
3553 	      case BUILT_IN_STRPBRK:
3554 	      case BUILT_IN_STRRCHR:
3555 	      case BUILT_IN_STRSPN:
3556 	      case BUILT_IN_STRSTR:
3557 		/* Assume certain string/memory builtins always return.  */
3558 		return false;
3559 	      default:
3560 		break;
3561 	      }
3562 	}
3563     }
3564 
3565   /* For all other calls assume that they might not always return.  */
3566   return true;
3567 }
3568 
3569 /* Return true if INSN should be made dependent on the previous instruction
3570    group, and if all INSN's dependencies should be moved to the first
3571    instruction of that group.  */
3572 
3573 static bool
chain_to_prev_insn_p(rtx_insn * insn)3574 chain_to_prev_insn_p (rtx_insn *insn)
3575 {
3576   /* INSN forms a group with the previous instruction.  */
3577   if (SCHED_GROUP_P (insn))
3578     return true;
3579 
3580   /* If the previous instruction clobbers a register R and this one sets
3581      part of R, the clobber was added specifically to help us track the
3582      liveness of R.  There's no point scheduling the clobber and leaving
3583      INSN behind, especially if we move the clobber to another block.  */
3584   rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
3585   if (prev
3586       && INSN_P (prev)
3587       && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3588       && GET_CODE (PATTERN (prev)) == CLOBBER)
3589     {
3590       rtx x = XEXP (PATTERN (prev), 0);
3591       if (set_of (x, insn))
3592 	return true;
3593     }
3594 
3595   return false;
3596 }
3597 
3598 /* Analyze INSN with DEPS as a context.  */
3599 void
deps_analyze_insn(struct deps_desc * deps,rtx_insn * insn)3600 deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
3601 {
3602   if (sched_deps_info->start_insn)
3603     sched_deps_info->start_insn (insn);
3604 
3605   /* Record the condition for this insn.  */
3606   if (NONDEBUG_INSN_P (insn))
3607     {
3608       rtx t;
3609       sched_get_condition_with_rev (insn, NULL);
3610       t = INSN_CACHED_COND (insn);
3611       INSN_COND_DEPS (insn) = NULL;
3612       if (reload_completed
3613 	  && (current_sched_info->flags & DO_PREDICATION)
3614 	  && COMPARISON_P (t)
3615 	  && REG_P (XEXP (t, 0))
3616 	  && CONSTANT_P (XEXP (t, 1)))
3617 	{
3618 	  unsigned int regno;
3619 	  int nregs;
3620 	  rtx_insn_list *cond_deps = NULL;
3621 	  t = XEXP (t, 0);
3622 	  regno = REGNO (t);
3623 	  nregs = REG_NREGS (t);
3624 	  while (nregs-- > 0)
3625 	    {
3626 	      struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3627 	      cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
3628 	      cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
3629 	      cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
3630 	    }
3631 	  INSN_COND_DEPS (insn) = cond_deps;
3632 	}
3633     }
3634 
3635   if (JUMP_P (insn))
3636     {
3637       /* Make each JUMP_INSN (but not a speculative check)
3638          a scheduling barrier for memory references.  */
3639       if (!deps->readonly
3640           && !(sel_sched_p ()
3641                && sel_insn_is_speculation_check (insn)))
3642         {
3643           /* Keep the list a reasonable size.  */
3644           if (deps->pending_flush_length++ >= MAX_PENDING_LIST_LENGTH)
3645             flush_pending_lists (deps, insn, true, true);
3646           else
3647 	    deps->pending_jump_insns
3648               = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3649         }
3650 
3651       /* For each insn which shouldn't cross a jump, add a dependence.  */
3652       add_dependence_list_and_free (deps, insn,
3653 				    &deps->sched_before_next_jump, 1,
3654 				    REG_DEP_ANTI, true);
3655 
3656       sched_analyze_insn (deps, PATTERN (insn), insn);
3657     }
3658   else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3659     {
3660       sched_analyze_insn (deps, PATTERN (insn), insn);
3661     }
3662   else if (CALL_P (insn))
3663     {
3664       int i;
3665 
3666       CANT_MOVE (insn) = 1;
3667 
3668       if (find_reg_note (insn, REG_SETJMP, NULL))
3669         {
3670           /* This is setjmp.  Assume that all registers, not just
3671              hard registers, may be clobbered by this call.  */
3672           reg_pending_barrier = MOVE_BARRIER;
3673         }
3674       else
3675         {
3676           for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3677             /* A call may read and modify global register variables.  */
3678             if (global_regs[i])
3679               {
3680                 SET_REGNO_REG_SET (reg_pending_sets, i);
3681                 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3682               }
3683           /* Other call-clobbered hard regs may be clobbered.
3684              Since we only have a choice between 'might be clobbered'
3685              and 'definitely not clobbered', we must include all
3686              partly call-clobbered registers here.  */
3687             else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3688                      || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3689               SET_REGNO_REG_SET (reg_pending_clobbers, i);
3690           /* We don't know what set of fixed registers might be used
3691              by the function, but it is certain that the stack pointer
3692              is among them, but be conservative.  */
3693             else if (fixed_regs[i])
3694 	      SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3695           /* The frame pointer is normally not used by the function
3696              itself, but by the debugger.  */
3697           /* ??? MIPS o32 is an exception.  It uses the frame pointer
3698              in the macro expansion of jal but does not represent this
3699              fact in the call_insn rtl.  */
3700             else if (i == FRAME_POINTER_REGNUM
3701                      || (i == HARD_FRAME_POINTER_REGNUM
3702                          && (! reload_completed || frame_pointer_needed)))
3703 	      SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3704         }
3705 
3706       /* For each insn which shouldn't cross a call, add a dependence
3707          between that insn and this call insn.  */
3708       add_dependence_list_and_free (deps, insn,
3709                                     &deps->sched_before_next_call, 1,
3710                                     REG_DEP_ANTI, true);
3711 
3712       sched_analyze_insn (deps, PATTERN (insn), insn);
3713 
3714       /* If CALL would be in a sched group, then this will violate
3715 	 convention that sched group insns have dependencies only on the
3716 	 previous instruction.
3717 
3718 	 Of course one can say: "Hey!  What about head of the sched group?"
3719 	 And I will answer: "Basic principles (one dep per insn) are always
3720 	 the same."  */
3721       gcc_assert (!SCHED_GROUP_P (insn));
3722 
3723       /* In the absence of interprocedural alias analysis, we must flush
3724          all pending reads and writes, and start new dependencies starting
3725          from here.  But only flush writes for constant calls (which may
3726          be passed a pointer to something we haven't written yet).  */
3727       flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3728 
3729       if (!deps->readonly)
3730         {
3731           /* Remember the last function call for limiting lifetimes.  */
3732           free_INSN_LIST_list (&deps->last_function_call);
3733           deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3734 
3735 	  if (call_may_noreturn_p (insn))
3736 	    {
3737 	      /* Remember the last function call that might not always return
3738 		 normally for limiting moves of trapping insns.  */
3739 	      free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3740 	      deps->last_function_call_may_noreturn
3741 		= alloc_INSN_LIST (insn, NULL_RTX);
3742 	    }
3743 
3744           /* Before reload, begin a post-call group, so as to keep the
3745              lifetimes of hard registers correct.  */
3746           if (! reload_completed)
3747             deps->in_post_call_group_p = post_call;
3748         }
3749     }
3750 
3751   if (sched_deps_info->use_cselib)
3752     cselib_process_insn (insn);
3753 
3754   if (sched_deps_info->finish_insn)
3755     sched_deps_info->finish_insn ();
3756 
3757   /* Fixup the dependencies in the sched group.  */
3758   if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3759       && chain_to_prev_insn_p (insn)
3760       && !sel_sched_p ())
3761     chain_to_prev_insn (insn);
3762 }
3763 
3764 /* Initialize DEPS for the new block beginning with HEAD.  */
3765 void
deps_start_bb(struct deps_desc * deps,rtx_insn * head)3766 deps_start_bb (struct deps_desc *deps, rtx_insn *head)
3767 {
3768   gcc_assert (!deps->readonly);
3769 
3770   /* Before reload, if the previous block ended in a call, show that
3771      we are inside a post-call group, so as to keep the lifetimes of
3772      hard registers correct.  */
3773   if (! reload_completed && !LABEL_P (head))
3774     {
3775       rtx_insn *insn = prev_nonnote_nondebug_insn (head);
3776 
3777       if (insn && CALL_P (insn))
3778 	deps->in_post_call_group_p = post_call_initial;
3779     }
3780 }
3781 
3782 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3783    dependencies for each insn.  */
3784 void
sched_analyze(struct deps_desc * deps,rtx_insn * head,rtx_insn * tail)3785 sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
3786 {
3787   rtx_insn *insn;
3788 
3789   if (sched_deps_info->use_cselib)
3790     cselib_init (CSELIB_RECORD_MEMORY);
3791 
3792   deps_start_bb (deps, head);
3793 
3794   for (insn = head;; insn = NEXT_INSN (insn))
3795     {
3796 
3797       if (INSN_P (insn))
3798 	{
3799 	  /* And initialize deps_lists.  */
3800 	  sd_init_insn (insn);
3801 	  /* Clean up SCHED_GROUP_P which may be set by last
3802 	     scheduler pass.  */
3803 	  if (SCHED_GROUP_P (insn))
3804 	    SCHED_GROUP_P (insn) = 0;
3805 	}
3806 
3807       deps_analyze_insn (deps, insn);
3808 
3809       if (insn == tail)
3810 	{
3811 	  if (sched_deps_info->use_cselib)
3812 	    cselib_finish ();
3813 	  return;
3814 	}
3815     }
3816   gcc_unreachable ();
3817 }
3818 
3819 /* Helper for sched_free_deps ().
3820    Delete INSN's (RESOLVED_P) backward dependencies.  */
3821 static void
delete_dep_nodes_in_back_deps(rtx_insn * insn,bool resolved_p)3822 delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p)
3823 {
3824   sd_iterator_def sd_it;
3825   dep_t dep;
3826   sd_list_types_def types;
3827 
3828   if (resolved_p)
3829     types = SD_LIST_RES_BACK;
3830   else
3831     types = SD_LIST_BACK;
3832 
3833   for (sd_it = sd_iterator_start (insn, types);
3834        sd_iterator_cond (&sd_it, &dep);)
3835     {
3836       dep_link_t link = *sd_it.linkp;
3837       dep_node_t node = DEP_LINK_NODE (link);
3838       deps_list_t back_list;
3839       deps_list_t forw_list;
3840 
3841       get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3842       remove_from_deps_list (link, back_list);
3843       delete_dep_node (node);
3844     }
3845 }
3846 
3847 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3848    deps_lists.  */
3849 void
sched_free_deps(rtx_insn * head,rtx_insn * tail,bool resolved_p)3850 sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
3851 {
3852   rtx_insn *insn;
3853   rtx_insn *next_tail = NEXT_INSN (tail);
3854 
3855   /* We make two passes since some insns may be scheduled before their
3856      dependencies are resolved.  */
3857   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3858     if (INSN_P (insn) && INSN_LUID (insn) > 0)
3859       {
3860 	/* Clear forward deps and leave the dep_nodes to the
3861 	   corresponding back_deps list.  */
3862 	if (resolved_p)
3863 	  clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3864 	else
3865 	  clear_deps_list (INSN_FORW_DEPS (insn));
3866       }
3867   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3868     if (INSN_P (insn) && INSN_LUID (insn) > 0)
3869       {
3870 	/* Clear resolved back deps together with its dep_nodes.  */
3871 	delete_dep_nodes_in_back_deps (insn, resolved_p);
3872 
3873 	sd_finish_insn (insn);
3874       }
3875 }
3876 
3877 /* Initialize variables for region data dependence analysis.
3878    When LAZY_REG_LAST is true, do not allocate reg_last array
3879    of struct deps_desc immediately.  */
3880 
3881 void
init_deps(struct deps_desc * deps,bool lazy_reg_last)3882 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3883 {
3884   int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3885 
3886   deps->max_reg = max_reg;
3887   if (lazy_reg_last)
3888     deps->reg_last = NULL;
3889   else
3890     deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3891   INIT_REG_SET (&deps->reg_last_in_use);
3892 
3893   deps->pending_read_insns = 0;
3894   deps->pending_read_mems = 0;
3895   deps->pending_write_insns = 0;
3896   deps->pending_write_mems = 0;
3897   deps->pending_jump_insns = 0;
3898   deps->pending_read_list_length = 0;
3899   deps->pending_write_list_length = 0;
3900   deps->pending_flush_length = 0;
3901   deps->last_pending_memory_flush = 0;
3902   deps->last_function_call = 0;
3903   deps->last_function_call_may_noreturn = 0;
3904   deps->sched_before_next_call = 0;
3905   deps->sched_before_next_jump = 0;
3906   deps->in_post_call_group_p = not_post_call;
3907   deps->last_debug_insn = 0;
3908   deps->last_args_size = 0;
3909   deps->last_reg_pending_barrier = NOT_A_BARRIER;
3910   deps->readonly = 0;
3911 }
3912 
3913 /* Init only reg_last field of DEPS, which was not allocated before as
3914    we inited DEPS lazily.  */
3915 void
init_deps_reg_last(struct deps_desc * deps)3916 init_deps_reg_last (struct deps_desc *deps)
3917 {
3918   gcc_assert (deps && deps->max_reg > 0);
3919   gcc_assert (deps->reg_last == NULL);
3920 
3921   deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3922 }
3923 
3924 
3925 /* Free insn lists found in DEPS.  */
3926 
3927 void
free_deps(struct deps_desc * deps)3928 free_deps (struct deps_desc *deps)
3929 {
3930   unsigned i;
3931   reg_set_iterator rsi;
3932 
3933   /* We set max_reg to 0 when this context was already freed.  */
3934   if (deps->max_reg == 0)
3935     {
3936       gcc_assert (deps->reg_last == NULL);
3937       return;
3938     }
3939   deps->max_reg = 0;
3940 
3941   free_INSN_LIST_list (&deps->pending_read_insns);
3942   free_EXPR_LIST_list (&deps->pending_read_mems);
3943   free_INSN_LIST_list (&deps->pending_write_insns);
3944   free_EXPR_LIST_list (&deps->pending_write_mems);
3945   free_INSN_LIST_list (&deps->last_pending_memory_flush);
3946 
3947   /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3948      times.  For a testcase with 42000 regs and 8000 small basic blocks,
3949      this loop accounted for nearly 60% (84 sec) of the total -O2 runtime.  */
3950   EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3951     {
3952       struct deps_reg *reg_last = &deps->reg_last[i];
3953       if (reg_last->uses)
3954 	free_INSN_LIST_list (&reg_last->uses);
3955       if (reg_last->sets)
3956 	free_INSN_LIST_list (&reg_last->sets);
3957       if (reg_last->implicit_sets)
3958 	free_INSN_LIST_list (&reg_last->implicit_sets);
3959       if (reg_last->control_uses)
3960 	free_INSN_LIST_list (&reg_last->control_uses);
3961       if (reg_last->clobbers)
3962 	free_INSN_LIST_list (&reg_last->clobbers);
3963     }
3964   CLEAR_REG_SET (&deps->reg_last_in_use);
3965 
3966   /* As we initialize reg_last lazily, it is possible that we didn't allocate
3967      it at all.  */
3968   free (deps->reg_last);
3969   deps->reg_last = NULL;
3970 
3971   deps = NULL;
3972 }
3973 
3974 /* Remove INSN from dependence contexts DEPS.  */
3975 void
remove_from_deps(struct deps_desc * deps,rtx_insn * insn)3976 remove_from_deps (struct deps_desc *deps, rtx_insn *insn)
3977 {
3978   int removed;
3979   unsigned i;
3980   reg_set_iterator rsi;
3981 
3982   removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3983                                                &deps->pending_read_mems);
3984   if (!DEBUG_INSN_P (insn))
3985     deps->pending_read_list_length -= removed;
3986   removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3987                                                &deps->pending_write_mems);
3988   deps->pending_write_list_length -= removed;
3989 
3990   removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
3991   deps->pending_flush_length -= removed;
3992   removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3993   deps->pending_flush_length -= removed;
3994 
3995   EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3996     {
3997       struct deps_reg *reg_last = &deps->reg_last[i];
3998       if (reg_last->uses)
3999 	remove_from_dependence_list (insn, &reg_last->uses);
4000       if (reg_last->sets)
4001 	remove_from_dependence_list (insn, &reg_last->sets);
4002       if (reg_last->implicit_sets)
4003 	remove_from_dependence_list (insn, &reg_last->implicit_sets);
4004       if (reg_last->clobbers)
4005 	remove_from_dependence_list (insn, &reg_last->clobbers);
4006       if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
4007 	  && !reg_last->clobbers)
4008         CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
4009     }
4010 
4011   if (CALL_P (insn))
4012     {
4013       remove_from_dependence_list (insn, &deps->last_function_call);
4014       remove_from_dependence_list (insn,
4015 				   &deps->last_function_call_may_noreturn);
4016     }
4017   remove_from_dependence_list (insn, &deps->sched_before_next_call);
4018 }
4019 
4020 /* Init deps data vector.  */
4021 static void
init_deps_data_vector(void)4022 init_deps_data_vector (void)
4023 {
4024   int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4025   if (reserve > 0 && ! h_d_i_d.space (reserve))
4026     h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
4027 }
4028 
4029 /* If it is profitable to use them, initialize or extend (depending on
4030    GLOBAL_P) dependency data.  */
4031 void
sched_deps_init(bool global_p)4032 sched_deps_init (bool global_p)
4033 {
4034   /* Average number of insns in the basic block.
4035      '+ 1' is used to make it nonzero.  */
4036   int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
4037 
4038   init_deps_data_vector ();
4039 
4040   /* We use another caching mechanism for selective scheduling, so
4041      we don't use this one.  */
4042   if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4043     {
4044       /* ?!? We could save some memory by computing a per-region luid mapping
4045          which could reduce both the number of vectors in the cache and the
4046          size of each vector.  Instead we just avoid the cache entirely unless
4047          the average number of instructions in a basic block is very high.  See
4048          the comment before the declaration of true_dependency_cache for
4049          what we consider "very high".  */
4050       cache_size = 0;
4051       extend_dependency_caches (sched_max_luid, true);
4052     }
4053 
4054   if (global_p)
4055     {
4056       dl_pool = new object_allocator<_deps_list> ("deps_list");
4057 				/* Allocate lists for one block at a time.  */
4058       dn_pool = new object_allocator<_dep_node> ("dep_node");
4059 				/* Allocate nodes for one block at a time.  */
4060     }
4061 }
4062 
4063 
4064 /* Create or extend (depending on CREATE_P) dependency caches to
4065    size N.  */
4066 void
extend_dependency_caches(int n,bool create_p)4067 extend_dependency_caches (int n, bool create_p)
4068 {
4069   if (create_p || true_dependency_cache)
4070     {
4071       int i, luid = cache_size + n;
4072 
4073       true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4074 					  luid);
4075       output_dependency_cache = XRESIZEVEC (bitmap_head,
4076 					    output_dependency_cache, luid);
4077       anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4078 					  luid);
4079       control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4080 					  luid);
4081 
4082       if (current_sched_info->flags & DO_SPECULATION)
4083         spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4084 					    luid);
4085 
4086       for (i = cache_size; i < luid; i++)
4087 	{
4088 	  bitmap_initialize (&true_dependency_cache[i], 0);
4089 	  bitmap_initialize (&output_dependency_cache[i], 0);
4090 	  bitmap_initialize (&anti_dependency_cache[i], 0);
4091 	  bitmap_initialize (&control_dependency_cache[i], 0);
4092 
4093           if (current_sched_info->flags & DO_SPECULATION)
4094             bitmap_initialize (&spec_dependency_cache[i], 0);
4095 	}
4096       cache_size = luid;
4097     }
4098 }
4099 
4100 /* Finalize dependency information for the whole function.  */
4101 void
sched_deps_finish(void)4102 sched_deps_finish (void)
4103 {
4104   gcc_assert (deps_pools_are_empty_p ());
4105   delete dn_pool;
4106   delete dl_pool;
4107   dn_pool = NULL;
4108   dl_pool = NULL;
4109 
4110   h_d_i_d.release ();
4111   cache_size = 0;
4112 
4113   if (true_dependency_cache)
4114     {
4115       int i;
4116 
4117       for (i = 0; i < cache_size; i++)
4118 	{
4119 	  bitmap_clear (&true_dependency_cache[i]);
4120 	  bitmap_clear (&output_dependency_cache[i]);
4121 	  bitmap_clear (&anti_dependency_cache[i]);
4122 	  bitmap_clear (&control_dependency_cache[i]);
4123 
4124           if (sched_deps_info->generate_spec_deps)
4125             bitmap_clear (&spec_dependency_cache[i]);
4126 	}
4127       free (true_dependency_cache);
4128       true_dependency_cache = NULL;
4129       free (output_dependency_cache);
4130       output_dependency_cache = NULL;
4131       free (anti_dependency_cache);
4132       anti_dependency_cache = NULL;
4133       free (control_dependency_cache);
4134       control_dependency_cache = NULL;
4135 
4136       if (sched_deps_info->generate_spec_deps)
4137         {
4138           free (spec_dependency_cache);
4139           spec_dependency_cache = NULL;
4140         }
4141 
4142     }
4143 }
4144 
4145 /* Initialize some global variables needed by the dependency analysis
4146    code.  */
4147 
4148 void
init_deps_global(void)4149 init_deps_global (void)
4150 {
4151   CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4152   CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4153   reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4154   reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4155   reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
4156   reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
4157   reg_pending_barrier = NOT_A_BARRIER;
4158 
4159   if (!sel_sched_p () || sched_emulate_haifa_p)
4160     {
4161       sched_deps_info->start_insn = haifa_start_insn;
4162       sched_deps_info->finish_insn = haifa_finish_insn;
4163 
4164       sched_deps_info->note_reg_set = haifa_note_reg_set;
4165       sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4166       sched_deps_info->note_reg_use = haifa_note_reg_use;
4167 
4168       sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4169       sched_deps_info->note_dep = haifa_note_dep;
4170    }
4171 }
4172 
4173 /* Free everything used by the dependency analysis code.  */
4174 
4175 void
finish_deps_global(void)4176 finish_deps_global (void)
4177 {
4178   FREE_REG_SET (reg_pending_sets);
4179   FREE_REG_SET (reg_pending_clobbers);
4180   FREE_REG_SET (reg_pending_uses);
4181   FREE_REG_SET (reg_pending_control_uses);
4182 }
4183 
4184 /* Estimate the weakness of dependence between MEM1 and MEM2.  */
4185 dw_t
estimate_dep_weak(rtx mem1,rtx mem2)4186 estimate_dep_weak (rtx mem1, rtx mem2)
4187 {
4188   rtx r1, r2;
4189 
4190   if (mem1 == mem2)
4191     /* MEMs are the same - don't speculate.  */
4192     return MIN_DEP_WEAK;
4193 
4194   r1 = XEXP (mem1, 0);
4195   r2 = XEXP (mem2, 0);
4196 
4197   if (r1 == r2
4198       || (REG_P (r1) && REG_P (r2)
4199 	  && REGNO (r1) == REGNO (r2)))
4200     /* Again, MEMs are the same.  */
4201     return MIN_DEP_WEAK;
4202   else if ((REG_P (r1) && !REG_P (r2))
4203 	   || (!REG_P (r1) && REG_P (r2)))
4204     /* Different addressing modes - reason to be more speculative,
4205        than usual.  */
4206     return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4207   else
4208     /* We can't say anything about the dependence.  */
4209     return UNCERTAIN_DEP_WEAK;
4210 }
4211 
4212 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4213    This function can handle same INSN and ELEM (INSN == ELEM).
4214    It is a convenience wrapper.  */
4215 static void
add_dependence_1(rtx_insn * insn,rtx_insn * elem,enum reg_note dep_type)4216 add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
4217 {
4218   ds_t ds;
4219   bool internal;
4220 
4221   if (dep_type == REG_DEP_TRUE)
4222     ds = DEP_TRUE;
4223   else if (dep_type == REG_DEP_OUTPUT)
4224     ds = DEP_OUTPUT;
4225   else if (dep_type == REG_DEP_CONTROL)
4226     ds = DEP_CONTROL;
4227   else
4228     {
4229       gcc_assert (dep_type == REG_DEP_ANTI);
4230       ds = DEP_ANTI;
4231     }
4232 
4233   /* When add_dependence is called from inside sched-deps.c, we expect
4234      cur_insn to be non-null.  */
4235   internal = cur_insn != NULL;
4236   if (internal)
4237     gcc_assert (insn == cur_insn);
4238   else
4239     cur_insn = insn;
4240 
4241   note_dep (elem, ds);
4242   if (!internal)
4243     cur_insn = NULL;
4244 }
4245 
4246 /* Return weakness of speculative type TYPE in the dep_status DS,
4247    without checking to prevent ICEs on malformed input.  */
4248 static dw_t
get_dep_weak_1(ds_t ds,ds_t type)4249 get_dep_weak_1 (ds_t ds, ds_t type)
4250 {
4251   ds = ds & type;
4252 
4253   switch (type)
4254     {
4255     case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4256     case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4257     case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4258     case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4259     default: gcc_unreachable ();
4260     }
4261 
4262   return (dw_t) ds;
4263 }
4264 
4265 /* Return weakness of speculative type TYPE in the dep_status DS.  */
4266 dw_t
get_dep_weak(ds_t ds,ds_t type)4267 get_dep_weak (ds_t ds, ds_t type)
4268 {
4269   dw_t dw = get_dep_weak_1 (ds, type);
4270 
4271   gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4272   return dw;
4273 }
4274 
4275 /* Return the dep_status, which has the same parameters as DS, except for
4276    speculative type TYPE, that will have weakness DW.  */
4277 ds_t
set_dep_weak(ds_t ds,ds_t type,dw_t dw)4278 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4279 {
4280   gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4281 
4282   ds &= ~type;
4283   switch (type)
4284     {
4285     case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4286     case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4287     case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4288     case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4289     default: gcc_unreachable ();
4290     }
4291   return ds;
4292 }
4293 
4294 /* Return the join of two dep_statuses DS1 and DS2.
4295    If MAX_P is true then choose the greater probability,
4296    otherwise multiply probabilities.
4297    This function assumes that both DS1 and DS2 contain speculative bits.  */
4298 static ds_t
ds_merge_1(ds_t ds1,ds_t ds2,bool max_p)4299 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4300 {
4301   ds_t ds, t;
4302 
4303   gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4304 
4305   ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4306 
4307   t = FIRST_SPEC_TYPE;
4308   do
4309     {
4310       if ((ds1 & t) && !(ds2 & t))
4311 	ds |= ds1 & t;
4312       else if (!(ds1 & t) && (ds2 & t))
4313 	ds |= ds2 & t;
4314       else if ((ds1 & t) && (ds2 & t))
4315 	{
4316 	  dw_t dw1 = get_dep_weak (ds1, t);
4317 	  dw_t dw2 = get_dep_weak (ds2, t);
4318 	  ds_t dw;
4319 
4320 	  if (!max_p)
4321 	    {
4322 	      dw = ((ds_t) dw1) * ((ds_t) dw2);
4323 	      dw /= MAX_DEP_WEAK;
4324 	      if (dw < MIN_DEP_WEAK)
4325 		dw = MIN_DEP_WEAK;
4326 	    }
4327 	  else
4328 	    {
4329 	      if (dw1 >= dw2)
4330 		dw = dw1;
4331 	      else
4332 		dw = dw2;
4333 	    }
4334 
4335 	  ds = set_dep_weak (ds, t, (dw_t) dw);
4336 	}
4337 
4338       if (t == LAST_SPEC_TYPE)
4339 	break;
4340       t <<= SPEC_TYPE_SHIFT;
4341     }
4342   while (1);
4343 
4344   return ds;
4345 }
4346 
4347 /* Return the join of two dep_statuses DS1 and DS2.
4348    This function assumes that both DS1 and DS2 contain speculative bits.  */
4349 ds_t
ds_merge(ds_t ds1,ds_t ds2)4350 ds_merge (ds_t ds1, ds_t ds2)
4351 {
4352   return ds_merge_1 (ds1, ds2, false);
4353 }
4354 
4355 /* Return the join of two dep_statuses DS1 and DS2.  */
4356 ds_t
ds_full_merge(ds_t ds,ds_t ds2,rtx mem1,rtx mem2)4357 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4358 {
4359   ds_t new_status = ds | ds2;
4360 
4361   if (new_status & SPECULATIVE)
4362     {
4363       if ((ds && !(ds & SPECULATIVE))
4364 	  || (ds2 && !(ds2 & SPECULATIVE)))
4365 	/* Then this dep can't be speculative.  */
4366 	new_status &= ~SPECULATIVE;
4367       else
4368 	{
4369 	  /* Both are speculative.  Merging probabilities.  */
4370 	  if (mem1)
4371 	    {
4372 	      dw_t dw;
4373 
4374 	      dw = estimate_dep_weak (mem1, mem2);
4375 	      ds = set_dep_weak (ds, BEGIN_DATA, dw);
4376 	    }
4377 
4378 	  if (!ds)
4379 	    new_status = ds2;
4380 	  else if (!ds2)
4381 	    new_status = ds;
4382 	  else
4383 	    new_status = ds_merge (ds2, ds);
4384 	}
4385     }
4386 
4387   return new_status;
4388 }
4389 
4390 /* Return the join of DS1 and DS2.  Use maximum instead of multiplying
4391    probabilities.  */
4392 ds_t
ds_max_merge(ds_t ds1,ds_t ds2)4393 ds_max_merge (ds_t ds1, ds_t ds2)
4394 {
4395   if (ds1 == 0 && ds2 == 0)
4396     return 0;
4397 
4398   if (ds1 == 0 && ds2 != 0)
4399     return ds2;
4400 
4401   if (ds1 != 0 && ds2 == 0)
4402     return ds1;
4403 
4404   return ds_merge_1 (ds1, ds2, true);
4405 }
4406 
4407 /* Return the probability of speculation success for the speculation
4408    status DS.  */
4409 dw_t
ds_weak(ds_t ds)4410 ds_weak (ds_t ds)
4411 {
4412   ds_t res = 1, dt;
4413   int n = 0;
4414 
4415   dt = FIRST_SPEC_TYPE;
4416   do
4417     {
4418       if (ds & dt)
4419 	{
4420 	  res *= (ds_t) get_dep_weak (ds, dt);
4421 	  n++;
4422 	}
4423 
4424       if (dt == LAST_SPEC_TYPE)
4425 	break;
4426       dt <<= SPEC_TYPE_SHIFT;
4427     }
4428   while (1);
4429 
4430   gcc_assert (n);
4431   while (--n)
4432     res /= MAX_DEP_WEAK;
4433 
4434   if (res < MIN_DEP_WEAK)
4435     res = MIN_DEP_WEAK;
4436 
4437   gcc_assert (res <= MAX_DEP_WEAK);
4438 
4439   return (dw_t) res;
4440 }
4441 
4442 /* Return a dep status that contains all speculation types of DS.  */
4443 ds_t
ds_get_speculation_types(ds_t ds)4444 ds_get_speculation_types (ds_t ds)
4445 {
4446   if (ds & BEGIN_DATA)
4447     ds |= BEGIN_DATA;
4448   if (ds & BE_IN_DATA)
4449     ds |= BE_IN_DATA;
4450   if (ds & BEGIN_CONTROL)
4451     ds |= BEGIN_CONTROL;
4452   if (ds & BE_IN_CONTROL)
4453     ds |= BE_IN_CONTROL;
4454 
4455   return ds & SPECULATIVE;
4456 }
4457 
4458 /* Return a dep status that contains maximal weakness for each speculation
4459    type present in DS.  */
4460 ds_t
ds_get_max_dep_weak(ds_t ds)4461 ds_get_max_dep_weak (ds_t ds)
4462 {
4463   if (ds & BEGIN_DATA)
4464     ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4465   if (ds & BE_IN_DATA)
4466     ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4467   if (ds & BEGIN_CONTROL)
4468     ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4469   if (ds & BE_IN_CONTROL)
4470     ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4471 
4472   return ds;
4473 }
4474 
4475 /* Dump information about the dependence status S.  */
4476 static void
dump_ds(FILE * f,ds_t s)4477 dump_ds (FILE *f, ds_t s)
4478 {
4479   fprintf (f, "{");
4480 
4481   if (s & BEGIN_DATA)
4482     fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4483   if (s & BE_IN_DATA)
4484     fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4485   if (s & BEGIN_CONTROL)
4486     fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4487   if (s & BE_IN_CONTROL)
4488     fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4489 
4490   if (s & HARD_DEP)
4491     fprintf (f, "HARD_DEP; ");
4492 
4493   if (s & DEP_TRUE)
4494     fprintf (f, "DEP_TRUE; ");
4495   if (s & DEP_OUTPUT)
4496     fprintf (f, "DEP_OUTPUT; ");
4497   if (s & DEP_ANTI)
4498     fprintf (f, "DEP_ANTI; ");
4499   if (s & DEP_CONTROL)
4500     fprintf (f, "DEP_CONTROL; ");
4501 
4502   fprintf (f, "}");
4503 }
4504 
4505 DEBUG_FUNCTION void
debug_ds(ds_t s)4506 debug_ds (ds_t s)
4507 {
4508   dump_ds (stderr, s);
4509   fprintf (stderr, "\n");
4510 }
4511 
4512 /* Verify that dependence type and status are consistent.
4513    If RELAXED_P is true, then skip dep_weakness checks.  */
4514 static void
check_dep(dep_t dep,bool relaxed_p)4515 check_dep (dep_t dep, bool relaxed_p)
4516 {
4517   enum reg_note dt = DEP_TYPE (dep);
4518   ds_t ds = DEP_STATUS (dep);
4519 
4520   gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4521 
4522   if (!(current_sched_info->flags & USE_DEPS_LIST))
4523     {
4524       gcc_assert (ds == 0);
4525       return;
4526     }
4527 
4528   /* Check that dependence type contains the same bits as the status.  */
4529   if (dt == REG_DEP_TRUE)
4530     gcc_assert (ds & DEP_TRUE);
4531   else if (dt == REG_DEP_OUTPUT)
4532     gcc_assert ((ds & DEP_OUTPUT)
4533 		&& !(ds & DEP_TRUE));
4534   else if (dt == REG_DEP_ANTI)
4535     gcc_assert ((ds & DEP_ANTI)
4536 		&& !(ds & (DEP_OUTPUT | DEP_TRUE)));
4537   else
4538     gcc_assert (dt == REG_DEP_CONTROL
4539 		&& (ds & DEP_CONTROL)
4540 		&& !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4541 
4542   /* HARD_DEP can not appear in dep_status of a link.  */
4543   gcc_assert (!(ds & HARD_DEP));
4544 
4545   /* Check that dependence status is set correctly when speculation is not
4546      supported.  */
4547   if (!sched_deps_info->generate_spec_deps)
4548     gcc_assert (!(ds & SPECULATIVE));
4549   else if (ds & SPECULATIVE)
4550     {
4551       if (!relaxed_p)
4552 	{
4553 	  ds_t type = FIRST_SPEC_TYPE;
4554 
4555 	  /* Check that dependence weakness is in proper range.  */
4556 	  do
4557 	    {
4558 	      if (ds & type)
4559 		get_dep_weak (ds, type);
4560 
4561 	      if (type == LAST_SPEC_TYPE)
4562 		break;
4563 	      type <<= SPEC_TYPE_SHIFT;
4564 	    }
4565 	  while (1);
4566 	}
4567 
4568       if (ds & BEGIN_SPEC)
4569 	{
4570 	  /* Only true dependence can be data speculative.  */
4571 	  if (ds & BEGIN_DATA)
4572 	    gcc_assert (ds & DEP_TRUE);
4573 
4574 	  /* Control dependencies in the insn scheduler are represented by
4575 	     anti-dependencies, therefore only anti dependence can be
4576 	     control speculative.  */
4577 	  if (ds & BEGIN_CONTROL)
4578 	    gcc_assert (ds & DEP_ANTI);
4579 	}
4580       else
4581 	{
4582 	  /* Subsequent speculations should resolve true dependencies.  */
4583 	  gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4584 	}
4585 
4586       /* Check that true and anti dependencies can't have other speculative
4587 	 statuses.  */
4588       if (ds & DEP_TRUE)
4589 	gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4590       /* An output dependence can't be speculative at all.  */
4591       gcc_assert (!(ds & DEP_OUTPUT));
4592       if (ds & DEP_ANTI)
4593 	gcc_assert (ds & BEGIN_CONTROL);
4594     }
4595 }
4596 
4597 /* The following code discovers opportunities to switch a memory reference
4598    and an increment by modifying the address.  We ensure that this is done
4599    only for dependencies that are only used to show a single register
4600    dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4601    instruction involved is subject to only one dep that can cause a pattern
4602    change.
4603 
4604    When we discover a suitable dependency, we fill in the dep_replacement
4605    structure to show how to modify the memory reference.  */
4606 
4607 /* Holds information about a pair of memory reference and register increment
4608    insns which depend on each other, but could possibly be interchanged.  */
4609 struct mem_inc_info
4610 {
4611   rtx_insn *inc_insn;
4612   rtx_insn *mem_insn;
4613 
4614   rtx *mem_loc;
4615   /* A register occurring in the memory address for which we wish to break
4616      the dependence.  This must be identical to the destination register of
4617      the increment.  */
4618   rtx mem_reg0;
4619   /* Any kind of index that is added to that register.  */
4620   rtx mem_index;
4621   /* The constant offset used in the memory address.  */
4622   HOST_WIDE_INT mem_constant;
4623   /* The constant added in the increment insn.  Negated if the increment is
4624      after the memory address.  */
4625   HOST_WIDE_INT inc_constant;
4626   /* The source register used in the increment.  May be different from mem_reg0
4627      if the increment occurs before the memory address.  */
4628   rtx inc_input;
4629 };
4630 
4631 /* Verify that the memory location described in MII can be replaced with
4632    one using NEW_ADDR.  Return the new memory reference or NULL_RTX.  The
4633    insn remains unchanged by this function.  */
4634 
4635 static rtx
attempt_change(struct mem_inc_info * mii,rtx new_addr)4636 attempt_change (struct mem_inc_info *mii, rtx new_addr)
4637 {
4638   rtx mem = *mii->mem_loc;
4639   rtx new_mem;
4640 
4641   /* Jump through a lot of hoops to keep the attributes up to date.  We
4642      do not want to call one of the change address variants that take
4643      an offset even though we know the offset in many cases.  These
4644      assume you are changing where the address is pointing by the
4645      offset.  */
4646   new_mem = replace_equiv_address_nv (mem, new_addr);
4647   if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4648     {
4649       if (sched_verbose >= 5)
4650 	fprintf (sched_dump, "validation failure\n");
4651       return NULL_RTX;
4652     }
4653 
4654   /* Put back the old one.  */
4655   validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4656 
4657   return new_mem;
4658 }
4659 
4660 /* Return true if INSN is of a form "a = b op c" where a and b are
4661    regs.  op is + if c is a reg and +|- if c is a const.  Fill in
4662    informantion in MII about what is found.
4663    BEFORE_MEM indicates whether the increment is found before or after
4664    a corresponding memory reference.  */
4665 
4666 static bool
parse_add_or_inc(struct mem_inc_info * mii,rtx_insn * insn,bool before_mem)4667 parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
4668 {
4669   rtx pat = single_set (insn);
4670   rtx src, cst;
4671   bool regs_equal;
4672 
4673   if (RTX_FRAME_RELATED_P (insn) || !pat)
4674     return false;
4675 
4676   /* Result must be single reg.  */
4677   if (!REG_P (SET_DEST (pat)))
4678     return false;
4679 
4680   if (GET_CODE (SET_SRC (pat)) != PLUS)
4681     return false;
4682 
4683   mii->inc_insn = insn;
4684   src = SET_SRC (pat);
4685   mii->inc_input = XEXP (src, 0);
4686 
4687   if (!REG_P (XEXP (src, 0)))
4688     return false;
4689 
4690   if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4691     return false;
4692 
4693   cst = XEXP (src, 1);
4694   if (!CONST_INT_P (cst))
4695     return false;
4696   mii->inc_constant = INTVAL (cst);
4697 
4698   regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4699 
4700   if (!before_mem)
4701     {
4702       mii->inc_constant = -mii->inc_constant;
4703       if (!regs_equal)
4704 	return false;
4705     }
4706 
4707   if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
4708     {
4709       /* Note that the sign has already been reversed for !before_mem.  */
4710       if (STACK_GROWS_DOWNWARD)
4711 	return mii->inc_constant > 0;
4712       else
4713 	return mii->inc_constant < 0;
4714     }
4715   return true;
4716 }
4717 
4718 /* Once a suitable mem reference has been found and the corresponding data
4719    in MII has been filled in, this function is called to find a suitable
4720    add or inc insn involving the register we found in the memory
4721    reference.  */
4722 
4723 static bool
find_inc(struct mem_inc_info * mii,bool backwards)4724 find_inc (struct mem_inc_info *mii, bool backwards)
4725 {
4726   sd_iterator_def sd_it;
4727   dep_t dep;
4728 
4729   sd_it = sd_iterator_start (mii->mem_insn,
4730 			     backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4731   while (sd_iterator_cond (&sd_it, &dep))
4732     {
4733       dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
4734       rtx_insn *pro = DEP_PRO (dep);
4735       rtx_insn *con = DEP_CON (dep);
4736       rtx_insn *inc_cand = backwards ? pro : con;
4737       if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4738 	goto next;
4739       if (parse_add_or_inc (mii, inc_cand, backwards))
4740 	{
4741 	  struct dep_replacement *desc;
4742 	  df_ref def;
4743 	  rtx newaddr, newmem;
4744 
4745 	  if (sched_verbose >= 5)
4746 	    fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4747 		     INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4748 
4749 	  /* Need to assure that none of the operands of the inc
4750 	     instruction are assigned to by the mem insn.  */
4751 	  FOR_EACH_INSN_DEF (def, mii->mem_insn)
4752 	    if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4753 		|| reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4754 	      {
4755 		if (sched_verbose >= 5)
4756 		  fprintf (sched_dump,
4757 			   "inc conflicts with store failure.\n");
4758 		goto next;
4759 	      }
4760 
4761 	  newaddr = mii->inc_input;
4762 	  if (mii->mem_index != NULL_RTX)
4763 	    newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4764 				    mii->mem_index);
4765 	  newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4766 				   mii->mem_constant + mii->inc_constant);
4767 	  newmem = attempt_change (mii, newaddr);
4768 	  if (newmem == NULL_RTX)
4769 	    goto next;
4770 	  if (sched_verbose >= 5)
4771 	    fprintf (sched_dump, "successful address replacement\n");
4772 	  desc = XCNEW (struct dep_replacement);
4773 	  DEP_REPLACE (dep) = desc;
4774 	  desc->loc = mii->mem_loc;
4775 	  desc->newval = newmem;
4776 	  desc->orig = *desc->loc;
4777 	  desc->insn = mii->mem_insn;
4778 	  move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4779 			 INSN_SPEC_BACK_DEPS (con));
4780 	  if (backwards)
4781 	    {
4782 	      FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
4783 		add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4784 				  REG_DEP_TRUE);
4785 	    }
4786 	  else
4787 	    {
4788 	      FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
4789 		add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4790 				  REG_DEP_ANTI);
4791 	    }
4792 	  return true;
4793 	}
4794     next:
4795       sd_iterator_next (&sd_it);
4796     }
4797   return false;
4798 }
4799 
4800 /* A recursive function that walks ADDRESS_OF_X to find memory references
4801    which could be modified during scheduling.  We call find_inc for each
4802    one we find that has a recognizable form.  MII holds information about
4803    the pair of memory/increment instructions.
4804    We ensure that every instruction with a memory reference (which will be
4805    the location of the replacement) is assigned at most one breakable
4806    dependency.  */
4807 
4808 static bool
find_mem(struct mem_inc_info * mii,rtx * address_of_x)4809 find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4810 {
4811   rtx x = *address_of_x;
4812   enum rtx_code code = GET_CODE (x);
4813   const char *const fmt = GET_RTX_FORMAT (code);
4814   int i;
4815 
4816   if (code == MEM)
4817     {
4818       rtx reg0 = XEXP (x, 0);
4819 
4820       mii->mem_loc = address_of_x;
4821       mii->mem_index = NULL_RTX;
4822       mii->mem_constant = 0;
4823       if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4824 	{
4825 	  mii->mem_constant = INTVAL (XEXP (reg0, 1));
4826 	  reg0 = XEXP (reg0, 0);
4827 	}
4828       if (GET_CODE (reg0) == PLUS)
4829 	{
4830 	  mii->mem_index = XEXP (reg0, 1);
4831 	  reg0 = XEXP (reg0, 0);
4832 	}
4833       if (REG_P (reg0))
4834 	{
4835 	  df_ref use;
4836 	  int occurrences = 0;
4837 
4838 	  /* Make sure this reg appears only once in this insn.  Can't use
4839 	     count_occurrences since that only works for pseudos.  */
4840 	  FOR_EACH_INSN_USE (use, mii->mem_insn)
4841 	    if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
4842 	      if (++occurrences > 1)
4843 		{
4844 		  if (sched_verbose >= 5)
4845 		    fprintf (sched_dump, "mem count failure\n");
4846 		  return false;
4847 		}
4848 
4849 	  mii->mem_reg0 = reg0;
4850 	  return find_inc (mii, true) || find_inc (mii, false);
4851 	}
4852       return false;
4853     }
4854 
4855   if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4856     {
4857       /* If REG occurs inside a MEM used in a bit-field reference,
4858 	 that is unacceptable.  */
4859       return false;
4860     }
4861 
4862   /* Time for some deep diving.  */
4863   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4864     {
4865       if (fmt[i] == 'e')
4866 	{
4867 	  if (find_mem (mii, &XEXP (x, i)))
4868 	    return true;
4869 	}
4870       else if (fmt[i] == 'E')
4871 	{
4872 	  int j;
4873 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4874 	    if (find_mem (mii, &XVECEXP (x, i, j)))
4875 	      return true;
4876 	}
4877     }
4878   return false;
4879 }
4880 
4881 
4882 /* Examine the instructions between HEAD and TAIL and try to find
4883    dependencies that can be broken by modifying one of the patterns.  */
4884 
4885 void
find_modifiable_mems(rtx_insn * head,rtx_insn * tail)4886 find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
4887 {
4888   rtx_insn *insn, *next_tail = NEXT_INSN (tail);
4889   int success_in_block = 0;
4890 
4891   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4892     {
4893       struct mem_inc_info mii;
4894 
4895       if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4896 	continue;
4897 
4898       mii.mem_insn = insn;
4899       if (find_mem (&mii, &PATTERN (insn)))
4900 	success_in_block++;
4901     }
4902   if (success_in_block && sched_verbose >= 5)
4903     fprintf (sched_dump, "%d candidates for address modification found.\n",
4904 	     success_in_block);
4905 }
4906 
4907 #endif /* INSN_SCHEDULING */
4908