xref: /dragonfly/contrib/gcc-4.7/gcc/haifa-sched.c (revision 37de577a)
1 /* Instruction scheduling pass.
2    Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3    2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
4    Free Software Foundation, Inc.
5    Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6    and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 
8 This file is part of GCC.
9 
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14 
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
18 for more details.
19 
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3.  If not see
22 <http://www.gnu.org/licenses/>.  */
23 
24 /* Instruction scheduling pass.  This file, along with sched-deps.c,
25    contains the generic parts.  The actual entry point is found for
26    the normal instruction scheduling pass is found in sched-rgn.c.
27 
28    We compute insn priorities based on data dependencies.  Flow
29    analysis only creates a fraction of the data-dependencies we must
30    observe: namely, only those dependencies which the combiner can be
31    expected to use.  For this pass, we must therefore create the
32    remaining dependencies we need to observe: register dependencies,
33    memory dependencies, dependencies to keep function calls in order,
34    and the dependence between a conditional branch and the setting of
35    condition codes are all dealt with here.
36 
37    The scheduler first traverses the data flow graph, starting with
38    the last instruction, and proceeding to the first, assigning values
39    to insn_priority as it goes.  This sorts the instructions
40    topologically by data dependence.
41 
42    Once priorities have been established, we order the insns using
43    list scheduling.  This works as follows: starting with a list of
44    all the ready insns, and sorted according to priority number, we
45    schedule the insn from the end of the list by placing its
46    predecessors in the list according to their priority order.  We
47    consider this insn scheduled by setting the pointer to the "end" of
48    the list to point to the previous insn.  When an insn has no
49    predecessors, we either queue it until sufficient time has elapsed
50    or add it to the ready list.  As the instructions are scheduled or
51    when stalls are introduced, the queue advances and dumps insns into
52    the ready list.  When all insns down to the lowest priority have
53    been scheduled, the critical path of the basic block has been made
54    as short as possible.  The remaining insns are then scheduled in
55    remaining slots.
56 
57    The following list shows the order in which we want to break ties
58    among insns in the ready list:
59 
60    1.  choose insn with the longest path to end of bb, ties
61    broken by
62    2.  choose insn with least contribution to register pressure,
63    ties broken by
64    3.  prefer in-block upon interblock motion, ties broken by
65    4.  prefer useful upon speculative motion, ties broken by
66    5.  choose insn with largest control flow probability, ties
67    broken by
68    6.  choose insn with the least dependences upon the previously
69    scheduled insn, or finally
70    7   choose the insn which has the most insns dependent on it.
71    8.  choose insn with lowest UID.
72 
73    Memory references complicate matters.  Only if we can be certain
74    that memory references are not part of the data dependency graph
75    (via true, anti, or output dependence), can we move operations past
76    memory references.  To first approximation, reads can be done
77    independently, while writes introduce dependencies.  Better
78    approximations will yield fewer dependencies.
79 
80    Before reload, an extended analysis of interblock data dependences
81    is required for interblock scheduling.  This is performed in
82    compute_block_backward_dependences ().
83 
84    Dependencies set up by memory references are treated in exactly the
85    same way as other dependencies, by using insn backward dependences
86    INSN_BACK_DEPS.  INSN_BACK_DEPS are translated into forward dependences
87    INSN_FORW_DEPS the purpose of forward list scheduling.
88 
89    Having optimized the critical path, we may have also unduly
90    extended the lifetimes of some registers.  If an operation requires
91    that constants be loaded into registers, it is certainly desirable
92    to load those constants as early as necessary, but no earlier.
93    I.e., it will not do to load up a bunch of registers at the
94    beginning of a basic block only to use them at the end, if they
95    could be loaded later, since this may result in excessive register
96    utilization.
97 
98    Note that since branches are never in basic blocks, but only end
99    basic blocks, this pass will not move branches.  But that is ok,
100    since we can use GNU's delayed branch scheduling pass to take care
101    of this case.
102 
103    Also note that no further optimizations based on algebraic
104    identities are performed, so this pass would be a good one to
105    perform instruction splitting, such as breaking up a multiply
106    instruction into shifts and adds where that is profitable.
107 
108    Given the memory aliasing analysis that this pass should perform,
109    it should be possible to remove redundant stores to memory, and to
110    load values from registers instead of hitting memory.
111 
112    Before reload, speculative insns are moved only if a 'proof' exists
113    that no exception will be caused by this, and if no live registers
114    exist that inhibit the motion (live registers constraints are not
115    represented by data dependence edges).
116 
117    This pass must update information that subsequent passes expect to
118    be correct.  Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119    reg_n_calls_crossed, and reg_live_length.  Also, BB_HEAD, BB_END.
120 
121    The information in the line number notes is carefully retained by
122    this pass.  Notes that refer to the starting and ending of
123    exception regions are also carefully retained by this pass.  All
124    other NOTE insns are grouped in their same relative order at the
125    beginning of basic blocks and regions that have been scheduled.  */
126 
127 #include "config.h"
128 #include "system.h"
129 #include "coretypes.h"
130 #include "tm.h"
131 #include "diagnostic-core.h"
132 #include "hard-reg-set.h"
133 #include "rtl.h"
134 #include "tm_p.h"
135 #include "regs.h"
136 #include "function.h"
137 #include "flags.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
140 #include "except.h"
141 #include "recog.h"
142 #include "sched-int.h"
143 #include "target.h"
144 #include "common/common-target.h"
145 #include "output.h"
146 #include "params.h"
147 #include "vecprim.h"
148 #include "dbgcnt.h"
149 #include "cfgloop.h"
150 #include "ira.h"
151 #include "emit-rtl.h"  /* FIXME: Can go away once crtl is moved to rtl.h.  */
152 #include "hashtab.h"
153 
154 #ifdef INSN_SCHEDULING
155 
156 /* issue_rate is the number of insns that can be scheduled in the same
157    machine cycle.  It can be defined in the config/mach/mach.h file,
158    otherwise we set it to 1.  */
159 
160 int issue_rate;
161 
162 /* This can be set to true by a backend if the scheduler should not
163    enable a DCE pass.  */
164 bool sched_no_dce;
165 
166 /* The current initiation interval used when modulo scheduling.  */
167 static int modulo_ii;
168 
169 /* The maximum number of stages we are prepared to handle.  */
170 static int modulo_max_stages;
171 
172 /* The number of insns that exist in each iteration of the loop.  We use this
173    to detect when we've scheduled all insns from the first iteration.  */
174 static int modulo_n_insns;
175 
176 /* The current count of insns in the first iteration of the loop that have
177    already been scheduled.  */
178 static int modulo_insns_scheduled;
179 
180 /* The maximum uid of insns from the first iteration of the loop.  */
181 static int modulo_iter0_max_uid;
182 
183 /* The number of times we should attempt to backtrack when modulo scheduling.
184    Decreased each time we have to backtrack.  */
185 static int modulo_backtracks_left;
186 
187 /* The stage in which the last insn from the original loop was
188    scheduled.  */
189 static int modulo_last_stage;
190 
191 /* sched-verbose controls the amount of debugging output the
192    scheduler prints.  It is controlled by -fsched-verbose=N:
193    N>0 and no -DSR : the output is directed to stderr.
194    N>=10 will direct the printouts to stderr (regardless of -dSR).
195    N=1: same as -dSR.
196    N=2: bb's probabilities, detailed ready list info, unit/insn info.
197    N=3: rtl at abort point, control-flow, regions info.
198    N=5: dependences info.  */
199 
200 int sched_verbose = 0;
201 
202 /* Debugging file.  All printouts are sent to dump, which is always set,
203    either to stderr, or to the dump listing file (-dRS).  */
204 FILE *sched_dump = 0;
205 
206 /* This is a placeholder for the scheduler parameters common
207    to all schedulers.  */
208 struct common_sched_info_def *common_sched_info;
209 
210 #define INSN_TICK(INSN)	(HID (INSN)->tick)
211 #define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick)
212 #define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate)
213 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
214 #define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
215 #define SHADOW_P(INSN) (HID (INSN)->shadow_p)
216 #define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
217 
218 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
219    then it should be recalculated from scratch.  */
220 #define INVALID_TICK (-(max_insn_queue_index + 1))
221 /* The minimal value of the INSN_TICK of an instruction.  */
222 #define MIN_TICK (-max_insn_queue_index)
223 
224 /* List of important notes we must keep around.  This is a pointer to the
225    last element in the list.  */
226 rtx note_list;
227 
228 static struct spec_info_def spec_info_var;
229 /* Description of the speculative part of the scheduling.
230    If NULL - no speculation.  */
231 spec_info_t spec_info = NULL;
232 
233 /* True, if recovery block was added during scheduling of current block.
234    Used to determine, if we need to fix INSN_TICKs.  */
235 static bool haifa_recovery_bb_recently_added_p;
236 
237 /* True, if recovery block was added during this scheduling pass.
238    Used to determine if we should have empty memory pools of dependencies
239    after finishing current region.  */
240 bool haifa_recovery_bb_ever_added_p;
241 
242 /* Counters of different types of speculative instructions.  */
243 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
244 
245 /* Array used in {unlink, restore}_bb_notes.  */
246 static rtx *bb_header = 0;
247 
248 /* Basic block after which recovery blocks will be created.  */
249 static basic_block before_recovery;
250 
251 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
252    created it.  */
253 basic_block after_recovery;
254 
255 /* FALSE if we add bb to another region, so we don't need to initialize it.  */
256 bool adding_bb_to_current_region_p = true;
257 
258 /* Queues, etc.  */
259 
260 /* An instruction is ready to be scheduled when all insns preceding it
261    have already been scheduled.  It is important to ensure that all
262    insns which use its result will not be executed until its result
263    has been computed.  An insn is maintained in one of four structures:
264 
265    (P) the "Pending" set of insns which cannot be scheduled until
266    their dependencies have been satisfied.
267    (Q) the "Queued" set of insns that can be scheduled when sufficient
268    time has passed.
269    (R) the "Ready" list of unscheduled, uncommitted insns.
270    (S) the "Scheduled" list of insns.
271 
272    Initially, all insns are either "Pending" or "Ready" depending on
273    whether their dependencies are satisfied.
274 
275    Insns move from the "Ready" list to the "Scheduled" list as they
276    are committed to the schedule.  As this occurs, the insns in the
277    "Pending" list have their dependencies satisfied and move to either
278    the "Ready" list or the "Queued" set depending on whether
279    sufficient time has passed to make them ready.  As time passes,
280    insns move from the "Queued" set to the "Ready" list.
281 
282    The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
283    unscheduled insns, i.e., those that are ready, queued, and pending.
284    The "Queued" set (Q) is implemented by the variable `insn_queue'.
285    The "Ready" list (R) is implemented by the variables `ready' and
286    `n_ready'.
287    The "Scheduled" list (S) is the new insn chain built by this pass.
288 
289    The transition (R->S) is implemented in the scheduling loop in
290    `schedule_block' when the best insn to schedule is chosen.
291    The transitions (P->R and P->Q) are implemented in `schedule_insn' as
292    insns move from the ready list to the scheduled list.
293    The transition (Q->R) is implemented in 'queue_to_insn' as time
294    passes or stalls are introduced.  */
295 
296 /* Implement a circular buffer to delay instructions until sufficient
297    time has passed.  For the new pipeline description interface,
298    MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
299    than maximal time of instruction execution computed by genattr.c on
300    the base maximal time of functional unit reservations and getting a
301    result.  This is the longest time an insn may be queued.  */
302 
303 static rtx *insn_queue;
304 static int q_ptr = 0;
305 static int q_size = 0;
306 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
307 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
308 
309 #define QUEUE_SCHEDULED (-3)
310 #define QUEUE_NOWHERE   (-2)
311 #define QUEUE_READY     (-1)
312 /* QUEUE_SCHEDULED - INSN is scheduled.
313    QUEUE_NOWHERE   - INSN isn't scheduled yet and is neither in
314    queue or ready list.
315    QUEUE_READY     - INSN is in ready list.
316    N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles.  */
317 
318 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
319 
320 /* The following variable value refers for all current and future
321    reservations of the processor units.  */
322 state_t curr_state;
323 
324 /* The following variable value is size of memory representing all
325    current and future reservations of the processor units.  */
326 size_t dfa_state_size;
327 
328 /* The following array is used to find the best insn from ready when
329    the automaton pipeline interface is used.  */
330 char *ready_try = NULL;
331 
332 /* The ready list.  */
333 struct ready_list ready = {NULL, 0, 0, 0, 0};
334 
335 /* The pointer to the ready list (to be removed).  */
336 static struct ready_list *readyp = &ready;
337 
338 /* Scheduling clock.  */
339 static int clock_var;
340 
341 /* Clock at which the previous instruction was issued.  */
342 static int last_clock_var;
343 
344 /* Set to true if, when queuing a shadow insn, we discover that it would be
345    scheduled too late.  */
346 static bool must_backtrack;
347 
348 /* The following variable value is number of essential insns issued on
349    the current cycle.  An insn is essential one if it changes the
350    processors state.  */
351 int cycle_issued_insns;
352 
353 /* This records the actual schedule.  It is built up during the main phase
354    of schedule_block, and afterwards used to reorder the insns in the RTL.  */
355 static VEC(rtx, heap) *scheduled_insns;
356 
357 static int may_trap_exp (const_rtx, int);
358 
359 /* Nonzero iff the address is comprised from at most 1 register.  */
360 #define CONST_BASED_ADDRESS_P(x)			\
361   (REG_P (x)					\
362    || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS	\
363 	|| (GET_CODE (x) == LO_SUM))			\
364        && (CONSTANT_P (XEXP (x, 0))			\
365 	   || CONSTANT_P (XEXP (x, 1)))))
366 
367 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
368    as found by analyzing insn's expression.  */
369 
370 
371 static int haifa_luid_for_non_insn (rtx x);
372 
373 /* Haifa version of sched_info hooks common to all headers.  */
374 const struct common_sched_info_def haifa_common_sched_info =
375   {
376     NULL, /* fix_recovery_cfg */
377     NULL, /* add_block */
378     NULL, /* estimate_number_of_insns */
379     haifa_luid_for_non_insn, /* luid_for_non_insn */
380     SCHED_PASS_UNKNOWN /* sched_pass_id */
381   };
382 
383 /* Mapping from instruction UID to its Logical UID.  */
384 VEC (int, heap) *sched_luids = NULL;
385 
386 /* Next LUID to assign to an instruction.  */
387 int sched_max_luid = 1;
388 
389 /* Haifa Instruction Data.  */
390 VEC (haifa_insn_data_def, heap) *h_i_d = NULL;
391 
392 void (* sched_init_only_bb) (basic_block, basic_block);
393 
394 /* Split block function.  Different schedulers might use different functions
395    to handle their internal data consistent.  */
396 basic_block (* sched_split_block) (basic_block, rtx);
397 
398 /* Create empty basic block after the specified block.  */
399 basic_block (* sched_create_empty_bb) (basic_block);
400 
401 static int
402 may_trap_exp (const_rtx x, int is_store)
403 {
404   enum rtx_code code;
405 
406   if (x == 0)
407     return TRAP_FREE;
408   code = GET_CODE (x);
409   if (is_store)
410     {
411       if (code == MEM && may_trap_p (x))
412 	return TRAP_RISKY;
413       else
414 	return TRAP_FREE;
415     }
416   if (code == MEM)
417     {
418       /* The insn uses memory:  a volatile load.  */
419       if (MEM_VOLATILE_P (x))
420 	return IRISKY;
421       /* An exception-free load.  */
422       if (!may_trap_p (x))
423 	return IFREE;
424       /* A load with 1 base register, to be further checked.  */
425       if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
426 	return PFREE_CANDIDATE;
427       /* No info on the load, to be further checked.  */
428       return PRISKY_CANDIDATE;
429     }
430   else
431     {
432       const char *fmt;
433       int i, insn_class = TRAP_FREE;
434 
435       /* Neither store nor load, check if it may cause a trap.  */
436       if (may_trap_p (x))
437 	return TRAP_RISKY;
438       /* Recursive step: walk the insn...  */
439       fmt = GET_RTX_FORMAT (code);
440       for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
441 	{
442 	  if (fmt[i] == 'e')
443 	    {
444 	      int tmp_class = may_trap_exp (XEXP (x, i), is_store);
445 	      insn_class = WORST_CLASS (insn_class, tmp_class);
446 	    }
447 	  else if (fmt[i] == 'E')
448 	    {
449 	      int j;
450 	      for (j = 0; j < XVECLEN (x, i); j++)
451 		{
452 		  int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
453 		  insn_class = WORST_CLASS (insn_class, tmp_class);
454 		  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
455 		    break;
456 		}
457 	    }
458 	  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
459 	    break;
460 	}
461       return insn_class;
462     }
463 }
464 
465 /* Classifies rtx X of an insn for the purpose of verifying that X can be
466    executed speculatively (and consequently the insn can be moved
467    speculatively), by examining X, returning:
468    TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
469    TRAP_FREE: non-load insn.
470    IFREE: load from a globally safe location.
471    IRISKY: volatile load.
472    PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
473    being either PFREE or PRISKY.  */
474 
475 static int
476 haifa_classify_rtx (const_rtx x)
477 {
478   int tmp_class = TRAP_FREE;
479   int insn_class = TRAP_FREE;
480   enum rtx_code code;
481 
482   if (GET_CODE (x) == PARALLEL)
483     {
484       int i, len = XVECLEN (x, 0);
485 
486       for (i = len - 1; i >= 0; i--)
487 	{
488 	  tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
489 	  insn_class = WORST_CLASS (insn_class, tmp_class);
490 	  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
491 	    break;
492 	}
493     }
494   else
495     {
496       code = GET_CODE (x);
497       switch (code)
498 	{
499 	case CLOBBER:
500 	  /* Test if it is a 'store'.  */
501 	  tmp_class = may_trap_exp (XEXP (x, 0), 1);
502 	  break;
503 	case SET:
504 	  /* Test if it is a store.  */
505 	  tmp_class = may_trap_exp (SET_DEST (x), 1);
506 	  if (tmp_class == TRAP_RISKY)
507 	    break;
508 	  /* Test if it is a load.  */
509 	  tmp_class =
510 	    WORST_CLASS (tmp_class,
511 			 may_trap_exp (SET_SRC (x), 0));
512 	  break;
513 	case COND_EXEC:
514 	  tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
515 	  if (tmp_class == TRAP_RISKY)
516 	    break;
517 	  tmp_class = WORST_CLASS (tmp_class,
518 				   may_trap_exp (COND_EXEC_TEST (x), 0));
519 	  break;
520 	case TRAP_IF:
521 	  tmp_class = TRAP_RISKY;
522 	  break;
523 	default:;
524 	}
525       insn_class = tmp_class;
526     }
527 
528   return insn_class;
529 }
530 
531 int
532 haifa_classify_insn (const_rtx insn)
533 {
534   return haifa_classify_rtx (PATTERN (insn));
535 }
536 
537 /* After the scheduler initialization function has been called, this function
538    can be called to enable modulo scheduling.  II is the initiation interval
539    we should use, it affects the delays for delay_pairs that were recorded as
540    separated by a given number of stages.
541 
542    MAX_STAGES provides us with a limit
543    after which we give up scheduling; the caller must have unrolled at least
544    as many copies of the loop body and recorded delay_pairs for them.
545 
546    INSNS is the number of real (non-debug) insns in one iteration of
547    the loop.  MAX_UID can be used to test whether an insn belongs to
548    the first iteration of the loop; all of them have a uid lower than
549    MAX_UID.  */
550 void
551 set_modulo_params (int ii, int max_stages, int insns, int max_uid)
552 {
553   modulo_ii = ii;
554   modulo_max_stages = max_stages;
555   modulo_n_insns = insns;
556   modulo_iter0_max_uid = max_uid;
557   modulo_backtracks_left = PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS);
558 }
559 
560 /* A structure to record a pair of insns where the first one is a real
561    insn that has delay slots, and the second is its delayed shadow.
562    I1 is scheduled normally and will emit an assembly instruction,
563    while I2 describes the side effect that takes place at the
564    transition between cycles CYCLES and (CYCLES + 1) after I1.  */
565 struct delay_pair
566 {
567   struct delay_pair *next_same_i1;
568   rtx i1, i2;
569   int cycles;
570   /* When doing modulo scheduling, we a delay_pair can also be used to
571      show that I1 and I2 are the same insn in a different stage.  If that
572      is the case, STAGES will be nonzero.  */
573   int stages;
574 };
575 
576 /* Two hash tables to record delay_pairs, one indexed by I1 and the other
577    indexed by I2.  */
578 static htab_t delay_htab;
579 static htab_t delay_htab_i2;
580 
581 /* Called through htab_traverse.  Walk the hashtable using I2 as
582    index, and delete all elements involving an UID higher than
583    that pointed to by *DATA.  */
584 static int
585 htab_i2_traverse (void **slot, void *data)
586 {
587   int maxuid = *(int *)data;
588   struct delay_pair *p = *(struct delay_pair **)slot;
589   if (INSN_UID (p->i2) >= maxuid || INSN_UID (p->i1) >= maxuid)
590     {
591       htab_clear_slot (delay_htab_i2, slot);
592     }
593   return 1;
594 }
595 
596 /* Called through htab_traverse.  Walk the hashtable using I2 as
597    index, and delete all elements involving an UID higher than
598    that pointed to by *DATA.  */
599 static int
600 htab_i1_traverse (void **slot, void *data)
601 {
602   int maxuid = *(int *)data;
603   struct delay_pair **pslot = (struct delay_pair **)slot;
604   struct delay_pair *p, *first, **pprev;
605 
606   if (INSN_UID ((*pslot)->i1) >= maxuid)
607     {
608       htab_clear_slot (delay_htab, slot);
609       return 1;
610     }
611   pprev = &first;
612   for (p = *pslot; p; p = p->next_same_i1)
613     {
614       if (INSN_UID (p->i2) < maxuid)
615 	{
616 	  *pprev = p;
617 	  pprev = &p->next_same_i1;
618 	}
619     }
620   *pprev = NULL;
621   if (first == NULL)
622     htab_clear_slot (delay_htab, slot);
623   else
624     *pslot = first;
625   return 1;
626 }
627 
628 /* Discard all delay pairs which involve an insn with an UID higher
629    than MAX_UID.  */
630 void
631 discard_delay_pairs_above (int max_uid)
632 {
633   htab_traverse (delay_htab, htab_i1_traverse, &max_uid);
634   htab_traverse (delay_htab_i2, htab_i2_traverse, &max_uid);
635 }
636 
637 /* Returns a hash value for X (which really is a delay_pair), based on
638    hashing just I1.  */
639 static hashval_t
640 delay_hash_i1 (const void *x)
641 {
642   return htab_hash_pointer (((const struct delay_pair *) x)->i1);
643 }
644 
645 /* Returns a hash value for X (which really is a delay_pair), based on
646    hashing just I2.  */
647 static hashval_t
648 delay_hash_i2 (const void *x)
649 {
650   return htab_hash_pointer (((const struct delay_pair *) x)->i2);
651 }
652 
653 /* Return nonzero if I1 of pair X is the same as that of pair Y.  */
654 static int
655 delay_i1_eq (const void *x, const void *y)
656 {
657   return ((const struct delay_pair *) x)->i1 == y;
658 }
659 
660 /* Return nonzero if I2 of pair X is the same as that of pair Y.  */
661 static int
662 delay_i2_eq (const void *x, const void *y)
663 {
664   return ((const struct delay_pair *) x)->i2 == y;
665 }
666 
667 /* This function can be called by a port just before it starts the final
668    scheduling pass.  It records the fact that an instruction with delay
669    slots has been split into two insns, I1 and I2.  The first one will be
670    scheduled normally and initiates the operation.  The second one is a
671    shadow which must follow a specific number of cycles after I1; its only
672    purpose is to show the side effect that occurs at that cycle in the RTL.
673    If a JUMP_INSN or a CALL_INSN has been split, I1 should be a normal INSN,
674    while I2 retains the original insn type.
675 
676    There are two ways in which the number of cycles can be specified,
677    involving the CYCLES and STAGES arguments to this function.  If STAGES
678    is zero, we just use the value of CYCLES.  Otherwise, STAGES is a factor
679    which is multiplied by MODULO_II to give the number of cycles.  This is
680    only useful if the caller also calls set_modulo_params to enable modulo
681    scheduling.  */
682 
683 void
684 record_delay_slot_pair (rtx i1, rtx i2, int cycles, int stages)
685 {
686   struct delay_pair *p = XNEW (struct delay_pair);
687   struct delay_pair **slot;
688 
689   p->i1 = i1;
690   p->i2 = i2;
691   p->cycles = cycles;
692   p->stages = stages;
693 
694   if (!delay_htab)
695     {
696       delay_htab = htab_create (10, delay_hash_i1, delay_i1_eq, NULL);
697       delay_htab_i2 = htab_create (10, delay_hash_i2, delay_i2_eq, free);
698     }
699   slot = ((struct delay_pair **)
700 	  htab_find_slot_with_hash (delay_htab, i1, htab_hash_pointer (i1),
701 				    INSERT));
702   p->next_same_i1 = *slot;
703   *slot = p;
704   slot = ((struct delay_pair **)
705 	  htab_find_slot_with_hash (delay_htab_i2, i2, htab_hash_pointer (i2),
706 				    INSERT));
707   *slot = p;
708 }
709 
710 /* Examine the delay pair hashtable to see if INSN is a shadow for another,
711    and return the other insn if so.  Return NULL otherwise.  */
712 rtx
713 real_insn_for_shadow (rtx insn)
714 {
715   struct delay_pair *pair;
716 
717   if (delay_htab == NULL)
718     return NULL_RTX;
719 
720   pair
721     = (struct delay_pair *)htab_find_with_hash (delay_htab_i2, insn,
722 						htab_hash_pointer (insn));
723   if (!pair || pair->stages > 0)
724     return NULL_RTX;
725   return pair->i1;
726 }
727 
728 /* For a pair P of insns, return the fixed distance in cycles from the first
729    insn after which the second must be scheduled.  */
730 static int
731 pair_delay (struct delay_pair *p)
732 {
733   if (p->stages == 0)
734     return p->cycles;
735   else
736     return p->stages * modulo_ii;
737 }
738 
739 /* Given an insn INSN, add a dependence on its delayed shadow if it
740    has one.  Also try to find situations where shadows depend on each other
741    and add dependencies to the real insns to limit the amount of backtracking
742    needed.  */
743 void
744 add_delay_dependencies (rtx insn)
745 {
746   struct delay_pair *pair;
747   sd_iterator_def sd_it;
748   dep_t dep;
749 
750   if (!delay_htab)
751     return;
752 
753   pair
754     = (struct delay_pair *)htab_find_with_hash (delay_htab_i2, insn,
755 						htab_hash_pointer (insn));
756   if (!pair)
757     return;
758   add_dependence (insn, pair->i1, REG_DEP_ANTI);
759   if (pair->stages)
760     return;
761 
762   FOR_EACH_DEP (pair->i2, SD_LIST_BACK, sd_it, dep)
763     {
764       rtx pro = DEP_PRO (dep);
765       struct delay_pair *other_pair
766 	= (struct delay_pair *)htab_find_with_hash (delay_htab_i2, pro,
767 						    htab_hash_pointer (pro));
768       if (!other_pair || other_pair->stages)
769 	continue;
770       if (pair_delay (other_pair) >= pair_delay (pair))
771 	{
772 	  if (sched_verbose >= 4)
773 	    {
774 	      fprintf (sched_dump, ";;\tadding dependence %d <- %d\n",
775 		       INSN_UID (other_pair->i1),
776 		       INSN_UID (pair->i1));
777 	      fprintf (sched_dump, ";;\tpair1 %d <- %d, cost %d\n",
778 		       INSN_UID (pair->i1),
779 		       INSN_UID (pair->i2),
780 		       pair_delay (pair));
781 	      fprintf (sched_dump, ";;\tpair2 %d <- %d, cost %d\n",
782 		       INSN_UID (other_pair->i1),
783 		       INSN_UID (other_pair->i2),
784 		       pair_delay (other_pair));
785 	    }
786 	  add_dependence (pair->i1, other_pair->i1, REG_DEP_ANTI);
787 	}
788     }
789 }
790 
791 /* Forward declarations.  */
792 
793 static int priority (rtx);
794 static int rank_for_schedule (const void *, const void *);
795 static void swap_sort (rtx *, int);
796 static void queue_insn (rtx, int, const char *);
797 static int schedule_insn (rtx);
798 static void adjust_priority (rtx);
799 static void advance_one_cycle (void);
800 static void extend_h_i_d (void);
801 
802 
803 /* Notes handling mechanism:
804    =========================
805    Generally, NOTES are saved before scheduling and restored after scheduling.
806    The scheduler distinguishes between two types of notes:
807 
808    (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
809    Before scheduling a region, a pointer to the note is added to the insn
810    that follows or precedes it.  (This happens as part of the data dependence
811    computation).  After scheduling an insn, the pointer contained in it is
812    used for regenerating the corresponding note (in reemit_notes).
813 
814    (2) All other notes (e.g. INSN_DELETED):  Before scheduling a block,
815    these notes are put in a list (in rm_other_notes() and
816    unlink_other_notes ()).  After scheduling the block, these notes are
817    inserted at the beginning of the block (in schedule_block()).  */
818 
819 static void ready_add (struct ready_list *, rtx, bool);
820 static rtx ready_remove_first (struct ready_list *);
821 static rtx ready_remove_first_dispatch (struct ready_list *ready);
822 
823 static void queue_to_ready (struct ready_list *);
824 static int early_queue_to_ready (state_t, struct ready_list *);
825 
826 static void debug_ready_list (struct ready_list *);
827 
828 /* The following functions are used to implement multi-pass scheduling
829    on the first cycle.  */
830 static rtx ready_remove (struct ready_list *, int);
831 static void ready_remove_insn (rtx);
832 
833 static void fix_inter_tick (rtx, rtx);
834 static int fix_tick_ready (rtx);
835 static void change_queue_index (rtx, int);
836 
837 /* The following functions are used to implement scheduling of data/control
838    speculative instructions.  */
839 
840 static void extend_h_i_d (void);
841 static void init_h_i_d (rtx);
842 static int haifa_speculate_insn (rtx, ds_t, rtx *);
843 static void generate_recovery_code (rtx);
844 static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
845 static void begin_speculative_block (rtx);
846 static void add_to_speculative_block (rtx);
847 static void init_before_recovery (basic_block *);
848 static void create_check_block_twin (rtx, bool);
849 static void fix_recovery_deps (basic_block);
850 static bool haifa_change_pattern (rtx, rtx);
851 static void dump_new_block_header (int, basic_block, rtx, rtx);
852 static void restore_bb_notes (basic_block);
853 static void fix_jump_move (rtx);
854 static void move_block_after_check (rtx);
855 static void move_succs (VEC(edge,gc) **, basic_block);
856 static void sched_remove_insn (rtx);
857 static void clear_priorities (rtx, rtx_vec_t *);
858 static void calc_priorities (rtx_vec_t);
859 static void add_jump_dependencies (rtx, rtx);
860 
861 #endif /* INSN_SCHEDULING */
862 
863 /* Point to state used for the current scheduling pass.  */
864 struct haifa_sched_info *current_sched_info;
865 
866 #ifndef INSN_SCHEDULING
867 void
868 schedule_insns (void)
869 {
870 }
871 #else
872 
873 /* Do register pressure sensitive insn scheduling if the flag is set
874    up.  */
875 bool sched_pressure_p;
876 
877 /* Map regno -> its pressure class.  The map defined only when
878    SCHED_PRESSURE_P is true.  */
879 enum reg_class *sched_regno_pressure_class;
880 
881 /* The current register pressure.  Only elements corresponding pressure
882    classes are defined.  */
883 static int curr_reg_pressure[N_REG_CLASSES];
884 
885 /* Saved value of the previous array.  */
886 static int saved_reg_pressure[N_REG_CLASSES];
887 
888 /* Register living at given scheduling point.  */
889 static bitmap curr_reg_live;
890 
891 /* Saved value of the previous array.  */
892 static bitmap saved_reg_live;
893 
894 /* Registers mentioned in the current region.  */
895 static bitmap region_ref_regs;
896 
897 /* Initiate register pressure relative info for scheduling the current
898    region.  Currently it is only clearing register mentioned in the
899    current region.  */
900 void
901 sched_init_region_reg_pressure_info (void)
902 {
903   bitmap_clear (region_ref_regs);
904 }
905 
906 /* Update current register pressure related info after birth (if
907    BIRTH_P) or death of register REGNO.  */
908 static void
909 mark_regno_birth_or_death (int regno, bool birth_p)
910 {
911   enum reg_class pressure_class;
912 
913   pressure_class = sched_regno_pressure_class[regno];
914   if (regno >= FIRST_PSEUDO_REGISTER)
915     {
916       if (pressure_class != NO_REGS)
917 	{
918 	  if (birth_p)
919 	    {
920 	      bitmap_set_bit (curr_reg_live, regno);
921 	      curr_reg_pressure[pressure_class]
922 		+= (ira_reg_class_max_nregs
923 		    [pressure_class][PSEUDO_REGNO_MODE (regno)]);
924 	    }
925 	  else
926 	    {
927 	      bitmap_clear_bit (curr_reg_live, regno);
928 	      curr_reg_pressure[pressure_class]
929 		-= (ira_reg_class_max_nregs
930 		    [pressure_class][PSEUDO_REGNO_MODE (regno)]);
931 	    }
932 	}
933     }
934   else if (pressure_class != NO_REGS
935 	   && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
936     {
937       if (birth_p)
938 	{
939 	  bitmap_set_bit (curr_reg_live, regno);
940 	  curr_reg_pressure[pressure_class]++;
941 	}
942       else
943 	{
944 	  bitmap_clear_bit (curr_reg_live, regno);
945 	  curr_reg_pressure[pressure_class]--;
946 	}
947     }
948 }
949 
950 /* Initiate current register pressure related info from living
951    registers given by LIVE.  */
952 static void
953 initiate_reg_pressure_info (bitmap live)
954 {
955   int i;
956   unsigned int j;
957   bitmap_iterator bi;
958 
959   for (i = 0; i < ira_pressure_classes_num; i++)
960     curr_reg_pressure[ira_pressure_classes[i]] = 0;
961   bitmap_clear (curr_reg_live);
962   EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
963     if (current_nr_blocks == 1 || bitmap_bit_p (region_ref_regs, j))
964       mark_regno_birth_or_death (j, true);
965 }
966 
967 /* Mark registers in X as mentioned in the current region.  */
968 static void
969 setup_ref_regs (rtx x)
970 {
971   int i, j, regno;
972   const RTX_CODE code = GET_CODE (x);
973   const char *fmt;
974 
975   if (REG_P (x))
976     {
977       regno = REGNO (x);
978       if (HARD_REGISTER_NUM_P (regno))
979 	bitmap_set_range (region_ref_regs, regno,
980 			  hard_regno_nregs[regno][GET_MODE (x)]);
981       else
982 	bitmap_set_bit (region_ref_regs, REGNO (x));
983       return;
984     }
985   fmt = GET_RTX_FORMAT (code);
986   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
987     if (fmt[i] == 'e')
988       setup_ref_regs (XEXP (x, i));
989     else if (fmt[i] == 'E')
990       {
991 	for (j = 0; j < XVECLEN (x, i); j++)
992 	  setup_ref_regs (XVECEXP (x, i, j));
993       }
994 }
995 
996 /* Initiate current register pressure related info at the start of
997    basic block BB.  */
998 static void
999 initiate_bb_reg_pressure_info (basic_block bb)
1000 {
1001   unsigned int i ATTRIBUTE_UNUSED;
1002   rtx insn;
1003 
1004   if (current_nr_blocks > 1)
1005     FOR_BB_INSNS (bb, insn)
1006       if (NONDEBUG_INSN_P (insn))
1007 	setup_ref_regs (PATTERN (insn));
1008   initiate_reg_pressure_info (df_get_live_in (bb));
1009 #ifdef EH_RETURN_DATA_REGNO
1010   if (bb_has_eh_pred (bb))
1011     for (i = 0; ; ++i)
1012       {
1013 	unsigned int regno = EH_RETURN_DATA_REGNO (i);
1014 
1015 	if (regno == INVALID_REGNUM)
1016 	  break;
1017 	if (! bitmap_bit_p (df_get_live_in (bb), regno))
1018 	  mark_regno_birth_or_death (regno, true);
1019       }
1020 #endif
1021 }
1022 
1023 /* Save current register pressure related info.  */
1024 static void
1025 save_reg_pressure (void)
1026 {
1027   int i;
1028 
1029   for (i = 0; i < ira_pressure_classes_num; i++)
1030     saved_reg_pressure[ira_pressure_classes[i]]
1031       = curr_reg_pressure[ira_pressure_classes[i]];
1032   bitmap_copy (saved_reg_live, curr_reg_live);
1033 }
1034 
1035 /* Restore saved register pressure related info.  */
1036 static void
1037 restore_reg_pressure (void)
1038 {
1039   int i;
1040 
1041   for (i = 0; i < ira_pressure_classes_num; i++)
1042     curr_reg_pressure[ira_pressure_classes[i]]
1043       = saved_reg_pressure[ira_pressure_classes[i]];
1044   bitmap_copy (curr_reg_live, saved_reg_live);
1045 }
1046 
1047 /* Return TRUE if the register is dying after its USE.  */
1048 static bool
1049 dying_use_p (struct reg_use_data *use)
1050 {
1051   struct reg_use_data *next;
1052 
1053   for (next = use->next_regno_use; next != use; next = next->next_regno_use)
1054     if (NONDEBUG_INSN_P (next->insn)
1055 	&& QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
1056       return false;
1057   return true;
1058 }
1059 
1060 /* Print info about the current register pressure and its excess for
1061    each pressure class.  */
1062 static void
1063 print_curr_reg_pressure (void)
1064 {
1065   int i;
1066   enum reg_class cl;
1067 
1068   fprintf (sched_dump, ";;\t");
1069   for (i = 0; i < ira_pressure_classes_num; i++)
1070     {
1071       cl = ira_pressure_classes[i];
1072       gcc_assert (curr_reg_pressure[cl] >= 0);
1073       fprintf (sched_dump, "  %s:%d(%d)", reg_class_names[cl],
1074 	       curr_reg_pressure[cl],
1075 	       curr_reg_pressure[cl] - ira_available_class_regs[cl]);
1076     }
1077   fprintf (sched_dump, "\n");
1078 }
1079 
1080 /* Determine if INSN has a condition that is clobbered if a register
1081    in SET_REGS is modified.  */
1082 static bool
1083 cond_clobbered_p (rtx insn, HARD_REG_SET set_regs)
1084 {
1085   rtx pat = PATTERN (insn);
1086   gcc_assert (GET_CODE (pat) == COND_EXEC);
1087   if (TEST_HARD_REG_BIT (set_regs, REGNO (XEXP (COND_EXEC_TEST (pat), 0))))
1088     {
1089       sd_iterator_def sd_it;
1090       dep_t dep;
1091       haifa_change_pattern (insn, ORIG_PAT (insn));
1092       FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1093 	DEP_STATUS (dep) &= ~DEP_CANCELLED;
1094       TODO_SPEC (insn) = HARD_DEP;
1095       if (sched_verbose >= 2)
1096 	fprintf (sched_dump,
1097 		 ";;\t\tdequeue insn %s because of clobbered condition\n",
1098 		 (*current_sched_info->print_insn) (insn, 0));
1099       return true;
1100     }
1101 
1102   return false;
1103 }
1104 
1105 /* Look at the remaining dependencies for insn NEXT, and compute and return
1106    the TODO_SPEC value we should use for it.  This is called after one of
1107    NEXT's dependencies has been resolved.  */
1108 
1109 static ds_t
1110 recompute_todo_spec (rtx next)
1111 {
1112   ds_t new_ds;
1113   sd_iterator_def sd_it;
1114   dep_t dep, control_dep = NULL;
1115   int n_spec = 0;
1116   int n_control = 0;
1117   bool first_p = true;
1118 
1119   if (sd_lists_empty_p (next, SD_LIST_BACK))
1120     /* NEXT has all its dependencies resolved.  */
1121     return 0;
1122 
1123   if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
1124     return HARD_DEP;
1125 
1126   /* Now we've got NEXT with speculative deps only.
1127      1. Look at the deps to see what we have to do.
1128      2. Check if we can do 'todo'.  */
1129   new_ds = 0;
1130 
1131   FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1132     {
1133       ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
1134 
1135       if (DEBUG_INSN_P (DEP_PRO (dep)) && !DEBUG_INSN_P (next))
1136 	continue;
1137 
1138       if (ds)
1139 	{
1140 	  n_spec++;
1141 	  if (first_p)
1142 	    {
1143 	      first_p = false;
1144 
1145 	      new_ds = ds;
1146 	    }
1147 	  else
1148 	    new_ds = ds_merge (new_ds, ds);
1149 	}
1150       if (DEP_TYPE (dep) == REG_DEP_CONTROL)
1151 	{
1152 	  n_control++;
1153 	  control_dep = dep;
1154 	  DEP_STATUS (dep) &= ~DEP_CANCELLED;
1155 	}
1156     }
1157 
1158   if (n_control == 1 && n_spec == 0)
1159     {
1160       rtx pro, other, new_pat;
1161       rtx cond = NULL_RTX;
1162       bool success;
1163       rtx prev = NULL_RTX;
1164       int i;
1165       unsigned regno;
1166 
1167       if ((current_sched_info->flags & DO_PREDICATION) == 0
1168 	  || (ORIG_PAT (next) != NULL_RTX
1169 	      && PREDICATED_PAT (next) == NULL_RTX))
1170 	return HARD_DEP;
1171 
1172       pro = DEP_PRO (control_dep);
1173       other = real_insn_for_shadow (pro);
1174       if (other != NULL_RTX)
1175 	pro = other;
1176 
1177       cond = sched_get_reverse_condition_uncached (pro);
1178       regno = REGNO (XEXP (cond, 0));
1179 
1180       /* Find the last scheduled insn that modifies the condition register.
1181 	 We can stop looking once we find the insn we depend on through the
1182 	 REG_DEP_CONTROL; if the condition register isn't modified after it,
1183 	 we know that it still has the right value.  */
1184       if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
1185 	FOR_EACH_VEC_ELT_REVERSE (rtx, scheduled_insns, i, prev)
1186 	  {
1187 	    HARD_REG_SET t;
1188 
1189 	    find_all_hard_reg_sets (prev, &t);
1190 	    if (TEST_HARD_REG_BIT (t, regno))
1191 	      return HARD_DEP;
1192 	    if (prev == pro)
1193 	      break;
1194 	  }
1195       if (ORIG_PAT (next) == NULL_RTX)
1196 	{
1197 	  ORIG_PAT (next) = PATTERN (next);
1198 
1199 	  new_pat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (next));
1200 	  success = haifa_change_pattern (next, new_pat);
1201 	  if (!success)
1202 	    return HARD_DEP;
1203 	  PREDICATED_PAT (next) = new_pat;
1204 	}
1205       else if (PATTERN (next) != PREDICATED_PAT (next))
1206 	{
1207 	  bool success = haifa_change_pattern (next,
1208 					       PREDICATED_PAT (next));
1209 	  gcc_assert (success);
1210 	}
1211       DEP_STATUS (control_dep) |= DEP_CANCELLED;
1212       return DEP_CONTROL;
1213     }
1214 
1215   if (PREDICATED_PAT (next) != NULL_RTX)
1216     {
1217       int tick = INSN_TICK (next);
1218       bool success = haifa_change_pattern (next,
1219 					   ORIG_PAT (next));
1220       INSN_TICK (next) = tick;
1221       gcc_assert (success);
1222     }
1223 
1224   /* We can't handle the case where there are both speculative and control
1225      dependencies, so we return HARD_DEP in such a case.  Also fail if
1226      we have speculative dependencies with not enough points, or more than
1227      one control dependency.  */
1228   if ((n_spec > 0 && n_control > 0)
1229       || (n_spec > 0
1230 	  /* Too few points?  */
1231 	  && ds_weak (new_ds) < spec_info->data_weakness_cutoff)
1232       || (n_control > 1))
1233     return HARD_DEP;
1234 
1235   return new_ds;
1236 }
1237 
1238 /* Pointer to the last instruction scheduled.  */
1239 static rtx last_scheduled_insn;
1240 
1241 /* Pointer to the last nondebug instruction scheduled within the
1242    block, or the prev_head of the scheduling block.  Used by
1243    rank_for_schedule, so that insns independent of the last scheduled
1244    insn will be preferred over dependent instructions.  */
1245 static rtx last_nondebug_scheduled_insn;
1246 
1247 /* Pointer that iterates through the list of unscheduled insns if we
1248    have a dbg_cnt enabled.  It always points at an insn prior to the
1249    first unscheduled one.  */
1250 static rtx nonscheduled_insns_begin;
1251 
1252 /* Cached cost of the instruction.  Use below function to get cost of the
1253    insn.  -1 here means that the field is not initialized.  */
1254 #define INSN_COST(INSN)	(HID (INSN)->cost)
1255 
1256 /* Compute cost of executing INSN.
1257    This is the number of cycles between instruction issue and
1258    instruction results.  */
1259 int
1260 insn_cost (rtx insn)
1261 {
1262   int cost;
1263 
1264   if (sel_sched_p ())
1265     {
1266       if (recog_memoized (insn) < 0)
1267 	return 0;
1268 
1269       cost = insn_default_latency (insn);
1270       if (cost < 0)
1271 	cost = 0;
1272 
1273       return cost;
1274     }
1275 
1276   cost = INSN_COST (insn);
1277 
1278   if (cost < 0)
1279     {
1280       /* A USE insn, or something else we don't need to
1281 	 understand.  We can't pass these directly to
1282 	 result_ready_cost or insn_default_latency because it will
1283 	 trigger a fatal error for unrecognizable insns.  */
1284       if (recog_memoized (insn) < 0)
1285 	{
1286 	  INSN_COST (insn) = 0;
1287 	  return 0;
1288 	}
1289       else
1290 	{
1291 	  cost = insn_default_latency (insn);
1292 	  if (cost < 0)
1293 	    cost = 0;
1294 
1295 	  INSN_COST (insn) = cost;
1296 	}
1297     }
1298 
1299   return cost;
1300 }
1301 
1302 /* Compute cost of dependence LINK.
1303    This is the number of cycles between instruction issue and
1304    instruction results.
1305    ??? We also use this function to call recog_memoized on all insns.  */
1306 int
1307 dep_cost_1 (dep_t link, dw_t dw)
1308 {
1309   rtx insn = DEP_PRO (link);
1310   rtx used = DEP_CON (link);
1311   int cost;
1312 
1313   if (DEP_COST (link) != UNKNOWN_DEP_COST)
1314     return DEP_COST (link);
1315 
1316   if (delay_htab)
1317     {
1318       struct delay_pair *delay_entry;
1319       delay_entry
1320 	= (struct delay_pair *)htab_find_with_hash (delay_htab_i2, used,
1321 						    htab_hash_pointer (used));
1322       if (delay_entry)
1323 	{
1324 	  if (delay_entry->i1 == insn)
1325 	    {
1326 	      DEP_COST (link) = pair_delay (delay_entry);
1327 	      return DEP_COST (link);
1328 	    }
1329 	}
1330     }
1331 
1332   /* A USE insn should never require the value used to be computed.
1333      This allows the computation of a function's result and parameter
1334      values to overlap the return and call.  We don't care about the
1335      dependence cost when only decreasing register pressure.  */
1336   if (recog_memoized (used) < 0)
1337     {
1338       cost = 0;
1339       recog_memoized (insn);
1340     }
1341   else
1342     {
1343       enum reg_note dep_type = DEP_TYPE (link);
1344 
1345       cost = insn_cost (insn);
1346 
1347       if (INSN_CODE (insn) >= 0)
1348 	{
1349 	  if (dep_type == REG_DEP_ANTI)
1350 	    cost = 0;
1351 	  else if (dep_type == REG_DEP_OUTPUT)
1352 	    {
1353 	      cost = (insn_default_latency (insn)
1354 		      - insn_default_latency (used));
1355 	      if (cost <= 0)
1356 		cost = 1;
1357 	    }
1358 	  else if (bypass_p (insn))
1359 	    cost = insn_latency (insn, used);
1360 	}
1361 
1362 
1363       if (targetm.sched.adjust_cost_2)
1364 	cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
1365 					    dw);
1366       else if (targetm.sched.adjust_cost != NULL)
1367 	{
1368 	  /* This variable is used for backward compatibility with the
1369 	     targets.  */
1370 	  rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
1371 
1372 	  /* Make it self-cycled, so that if some tries to walk over this
1373 	     incomplete list he/she will be caught in an endless loop.  */
1374 	  XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
1375 
1376 	  /* Targets use only REG_NOTE_KIND of the link.  */
1377 	  PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
1378 
1379 	  cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
1380 					    insn, cost);
1381 
1382 	  free_INSN_LIST_node (dep_cost_rtx_link);
1383 	}
1384 
1385       if (cost < 0)
1386 	cost = 0;
1387     }
1388 
1389   DEP_COST (link) = cost;
1390   return cost;
1391 }
1392 
1393 /* Compute cost of dependence LINK.
1394    This is the number of cycles between instruction issue and
1395    instruction results.  */
1396 int
1397 dep_cost (dep_t link)
1398 {
1399   return dep_cost_1 (link, 0);
1400 }
1401 
1402 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
1403    INSN_PRIORITY explicitly.  */
1404 void
1405 increase_insn_priority (rtx insn, int amount)
1406 {
1407   if (!sel_sched_p ())
1408     {
1409       /* We're dealing with haifa-sched.c INSN_PRIORITY.  */
1410       if (INSN_PRIORITY_KNOWN (insn))
1411 	  INSN_PRIORITY (insn) += amount;
1412     }
1413   else
1414     {
1415       /* In sel-sched.c INSN_PRIORITY is not kept up to date.
1416 	 Use EXPR_PRIORITY instead. */
1417       sel_add_to_insn_priority (insn, amount);
1418     }
1419 }
1420 
1421 /* Return 'true' if DEP should be included in priority calculations.  */
1422 static bool
1423 contributes_to_priority_p (dep_t dep)
1424 {
1425   if (DEBUG_INSN_P (DEP_CON (dep))
1426       || DEBUG_INSN_P (DEP_PRO (dep)))
1427     return false;
1428 
1429   /* Critical path is meaningful in block boundaries only.  */
1430   if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
1431 						    DEP_PRO (dep)))
1432     return false;
1433 
1434   /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
1435      then speculative instructions will less likely be
1436      scheduled.  That is because the priority of
1437      their producers will increase, and, thus, the
1438      producers will more likely be scheduled, thus,
1439      resolving the dependence.  */
1440   if (sched_deps_info->generate_spec_deps
1441       && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
1442       && (DEP_STATUS (dep) & SPECULATIVE))
1443     return false;
1444 
1445   return true;
1446 }
1447 
1448 /* Compute the number of nondebug forward deps of an insn.  */
1449 
1450 static int
1451 dep_list_size (rtx insn)
1452 {
1453   sd_iterator_def sd_it;
1454   dep_t dep;
1455   int dbgcount = 0, nodbgcount = 0;
1456 
1457   if (!MAY_HAVE_DEBUG_INSNS)
1458     return sd_lists_size (insn, SD_LIST_FORW);
1459 
1460   FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
1461     {
1462       if (DEBUG_INSN_P (DEP_CON (dep)))
1463 	dbgcount++;
1464       else if (!DEBUG_INSN_P (DEP_PRO (dep)))
1465 	nodbgcount++;
1466     }
1467 
1468   gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, SD_LIST_FORW));
1469 
1470   return nodbgcount;
1471 }
1472 
1473 /* Compute the priority number for INSN.  */
1474 static int
1475 priority (rtx insn)
1476 {
1477   if (! INSN_P (insn))
1478     return 0;
1479 
1480   /* We should not be interested in priority of an already scheduled insn.  */
1481   gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
1482 
1483   if (!INSN_PRIORITY_KNOWN (insn))
1484     {
1485       int this_priority = -1;
1486 
1487       if (dep_list_size (insn) == 0)
1488 	/* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1489 	   some forward deps but all of them are ignored by
1490 	   contributes_to_priority hook.  At the moment we set priority of
1491 	   such insn to 0.  */
1492 	this_priority = insn_cost (insn);
1493       else
1494 	{
1495 	  rtx prev_first, twin;
1496 	  basic_block rec;
1497 
1498 	  /* For recovery check instructions we calculate priority slightly
1499 	     different than that of normal instructions.  Instead of walking
1500 	     through INSN_FORW_DEPS (check) list, we walk through
1501 	     INSN_FORW_DEPS list of each instruction in the corresponding
1502 	     recovery block.  */
1503 
1504           /* Selective scheduling does not define RECOVERY_BLOCK macro.  */
1505 	  rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1506 	  if (!rec || rec == EXIT_BLOCK_PTR)
1507 	    {
1508 	      prev_first = PREV_INSN (insn);
1509 	      twin = insn;
1510 	    }
1511 	  else
1512 	    {
1513 	      prev_first = NEXT_INSN (BB_HEAD (rec));
1514 	      twin = PREV_INSN (BB_END (rec));
1515 	    }
1516 
1517 	  do
1518 	    {
1519 	      sd_iterator_def sd_it;
1520 	      dep_t dep;
1521 
1522 	      FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1523 		{
1524 		  rtx next;
1525 		  int next_priority;
1526 
1527 		  next = DEP_CON (dep);
1528 
1529 		  if (BLOCK_FOR_INSN (next) != rec)
1530 		    {
1531 		      int cost;
1532 
1533 		      if (!contributes_to_priority_p (dep))
1534 			continue;
1535 
1536 		      if (twin == insn)
1537 			cost = dep_cost (dep);
1538 		      else
1539 			{
1540 			  struct _dep _dep1, *dep1 = &_dep1;
1541 
1542 			  init_dep (dep1, insn, next, REG_DEP_ANTI);
1543 
1544 			  cost = dep_cost (dep1);
1545 			}
1546 
1547 		      next_priority = cost + priority (next);
1548 
1549 		      if (next_priority > this_priority)
1550 			this_priority = next_priority;
1551 		    }
1552 		}
1553 
1554 	      twin = PREV_INSN (twin);
1555 	    }
1556 	  while (twin != prev_first);
1557 	}
1558 
1559       if (this_priority < 0)
1560 	{
1561 	  gcc_assert (this_priority == -1);
1562 
1563 	  this_priority = insn_cost (insn);
1564 	}
1565 
1566       INSN_PRIORITY (insn) = this_priority;
1567       INSN_PRIORITY_STATUS (insn) = 1;
1568     }
1569 
1570   return INSN_PRIORITY (insn);
1571 }
1572 
1573 /* Macros and functions for keeping the priority queue sorted, and
1574    dealing with queuing and dequeuing of instructions.  */
1575 
1576 #define SCHED_SORT(READY, N_READY)                                   \
1577 do { if ((N_READY) == 2)				             \
1578        swap_sort (READY, N_READY);			             \
1579      else if ((N_READY) > 2)                                         \
1580          qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); }  \
1581 while (0)
1582 
1583 /* Setup info about the current register pressure impact of scheduling
1584    INSN at the current scheduling point.  */
1585 static void
1586 setup_insn_reg_pressure_info (rtx insn)
1587 {
1588   int i, change, before, after, hard_regno;
1589   int excess_cost_change;
1590   enum machine_mode mode;
1591   enum reg_class cl;
1592   struct reg_pressure_data *pressure_info;
1593   int *max_reg_pressure;
1594   struct reg_use_data *use;
1595   static int death[N_REG_CLASSES];
1596 
1597   gcc_checking_assert (!DEBUG_INSN_P (insn));
1598 
1599   excess_cost_change = 0;
1600   for (i = 0; i < ira_pressure_classes_num; i++)
1601     death[ira_pressure_classes[i]] = 0;
1602   for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1603     if (dying_use_p (use))
1604       {
1605 	cl = sched_regno_pressure_class[use->regno];
1606 	if (use->regno < FIRST_PSEUDO_REGISTER)
1607 	  death[cl]++;
1608 	else
1609 	  death[cl]
1610 	    += ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (use->regno)];
1611       }
1612   pressure_info = INSN_REG_PRESSURE (insn);
1613   max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1614   gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1615   for (i = 0; i < ira_pressure_classes_num; i++)
1616     {
1617       cl = ira_pressure_classes[i];
1618       gcc_assert (curr_reg_pressure[cl] >= 0);
1619       change = (int) pressure_info[i].set_increase - death[cl];
1620       before = MAX (0, max_reg_pressure[i] - ira_available_class_regs[cl]);
1621       after = MAX (0, max_reg_pressure[i] + change
1622 		   - ira_available_class_regs[cl]);
1623       hard_regno = ira_class_hard_regs[cl][0];
1624       gcc_assert (hard_regno >= 0);
1625       mode = reg_raw_mode[hard_regno];
1626       excess_cost_change += ((after - before)
1627 			     * (ira_memory_move_cost[mode][cl][0]
1628 				+ ira_memory_move_cost[mode][cl][1]));
1629     }
1630   INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1631 }
1632 
1633 /* Returns a positive value if x is preferred; returns a negative value if
1634    y is preferred.  Should never return 0, since that will make the sort
1635    unstable.  */
1636 
1637 static int
1638 rank_for_schedule (const void *x, const void *y)
1639 {
1640   rtx tmp = *(const rtx *) y;
1641   rtx tmp2 = *(const rtx *) x;
1642   int tmp_class, tmp2_class;
1643   int val, priority_val, info_val;
1644 
1645   if (MAY_HAVE_DEBUG_INSNS)
1646     {
1647       /* Schedule debug insns as early as possible.  */
1648       if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
1649 	return -1;
1650       else if (!DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
1651 	return 1;
1652       else if (DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
1653 	return INSN_LUID (tmp) - INSN_LUID (tmp2);
1654     }
1655 
1656   /* The insn in a schedule group should be issued the first.  */
1657   if (flag_sched_group_heuristic &&
1658       SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
1659     return SCHED_GROUP_P (tmp2) ? 1 : -1;
1660 
1661   /* Make sure that priority of TMP and TMP2 are initialized.  */
1662   gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
1663 
1664   if (sched_pressure_p)
1665     {
1666       int diff;
1667 
1668       /* Prefer insn whose scheduling results in the smallest register
1669 	 pressure excess.  */
1670       if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
1671 		   + (INSN_TICK (tmp) > clock_var
1672 		      ? INSN_TICK (tmp) - clock_var : 0)
1673 		   - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
1674 		   - (INSN_TICK (tmp2) > clock_var
1675 		      ? INSN_TICK (tmp2) - clock_var : 0))) != 0)
1676 	return diff;
1677     }
1678 
1679 
1680   if (sched_pressure_p
1681       && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var))
1682     {
1683       if (INSN_TICK (tmp) <= clock_var)
1684 	return -1;
1685       else if (INSN_TICK (tmp2) <= clock_var)
1686 	return 1;
1687       else
1688 	return INSN_TICK (tmp) - INSN_TICK (tmp2);
1689     }
1690 
1691   /* If we are doing backtracking in this schedule, prefer insns that
1692      have forward dependencies with negative cost against an insn that
1693      was already scheduled.  */
1694   if (current_sched_info->flags & DO_BACKTRACKING)
1695     {
1696       priority_val = FEEDS_BACKTRACK_INSN (tmp2) - FEEDS_BACKTRACK_INSN (tmp);
1697       if (priority_val)
1698 	return priority_val;
1699     }
1700 
1701   /* Prefer insn with higher priority.  */
1702   priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
1703 
1704   if (flag_sched_critical_path_heuristic && priority_val)
1705     return priority_val;
1706 
1707   /* Prefer speculative insn with greater dependencies weakness.  */
1708   if (flag_sched_spec_insn_heuristic && spec_info)
1709     {
1710       ds_t ds1, ds2;
1711       dw_t dw1, dw2;
1712       int dw;
1713 
1714       ds1 = TODO_SPEC (tmp) & SPECULATIVE;
1715       if (ds1)
1716 	dw1 = ds_weak (ds1);
1717       else
1718 	dw1 = NO_DEP_WEAK;
1719 
1720       ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
1721       if (ds2)
1722 	dw2 = ds_weak (ds2);
1723       else
1724 	dw2 = NO_DEP_WEAK;
1725 
1726       dw = dw2 - dw1;
1727       if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
1728 	return dw;
1729     }
1730 
1731   info_val = (*current_sched_info->rank) (tmp, tmp2);
1732   if(flag_sched_rank_heuristic && info_val)
1733     return info_val;
1734 
1735   /* Compare insns based on their relation to the last scheduled
1736      non-debug insn.  */
1737   if (flag_sched_last_insn_heuristic && last_nondebug_scheduled_insn)
1738     {
1739       dep_t dep1;
1740       dep_t dep2;
1741       rtx last = last_nondebug_scheduled_insn;
1742 
1743       /* Classify the instructions into three classes:
1744          1) Data dependent on last schedule insn.
1745          2) Anti/Output dependent on last scheduled insn.
1746          3) Independent of last scheduled insn, or has latency of one.
1747          Choose the insn from the highest numbered class if different.  */
1748       dep1 = sd_find_dep_between (last, tmp, true);
1749 
1750       if (dep1 == NULL || dep_cost (dep1) == 1)
1751 	tmp_class = 3;
1752       else if (/* Data dependence.  */
1753 	       DEP_TYPE (dep1) == REG_DEP_TRUE)
1754 	tmp_class = 1;
1755       else
1756 	tmp_class = 2;
1757 
1758       dep2 = sd_find_dep_between (last, tmp2, true);
1759 
1760       if (dep2 == NULL || dep_cost (dep2)  == 1)
1761 	tmp2_class = 3;
1762       else if (/* Data dependence.  */
1763 	       DEP_TYPE (dep2) == REG_DEP_TRUE)
1764 	tmp2_class = 1;
1765       else
1766 	tmp2_class = 2;
1767 
1768       if ((val = tmp2_class - tmp_class))
1769 	return val;
1770     }
1771 
1772   /* Prefer the insn which has more later insns that depend on it.
1773      This gives the scheduler more freedom when scheduling later
1774      instructions at the expense of added register pressure.  */
1775 
1776   val = (dep_list_size (tmp2) - dep_list_size (tmp));
1777 
1778   if (flag_sched_dep_count_heuristic && val != 0)
1779     return val;
1780 
1781   /* If insns are equally good, sort by INSN_LUID (original insn order),
1782      so that we make the sort stable.  This minimizes instruction movement,
1783      thus minimizing sched's effect on debugging and cross-jumping.  */
1784   return INSN_LUID (tmp) - INSN_LUID (tmp2);
1785 }
1786 
1787 /* Resort the array A in which only element at index N may be out of order.  */
1788 
1789 HAIFA_INLINE static void
1790 swap_sort (rtx *a, int n)
1791 {
1792   rtx insn = a[n - 1];
1793   int i = n - 2;
1794 
1795   while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
1796     {
1797       a[i + 1] = a[i];
1798       i -= 1;
1799     }
1800   a[i + 1] = insn;
1801 }
1802 
1803 /* Add INSN to the insn queue so that it can be executed at least
1804    N_CYCLES after the currently executing insn.  Preserve insns
1805    chain for debugging purposes.  REASON will be printed in debugging
1806    output.  */
1807 
1808 HAIFA_INLINE static void
1809 queue_insn (rtx insn, int n_cycles, const char *reason)
1810 {
1811   int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1812   rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
1813   int new_tick;
1814 
1815   gcc_assert (n_cycles <= max_insn_queue_index);
1816   gcc_assert (!DEBUG_INSN_P (insn));
1817 
1818   insn_queue[next_q] = link;
1819   q_size += 1;
1820 
1821   if (sched_verbose >= 2)
1822     {
1823       fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
1824 	       (*current_sched_info->print_insn) (insn, 0));
1825 
1826       fprintf (sched_dump, "queued for %d cycles (%s).\n", n_cycles, reason);
1827     }
1828 
1829   QUEUE_INDEX (insn) = next_q;
1830 
1831   if (current_sched_info->flags & DO_BACKTRACKING)
1832     {
1833       new_tick = clock_var + n_cycles;
1834       if (INSN_TICK (insn) == INVALID_TICK || INSN_TICK (insn) < new_tick)
1835 	INSN_TICK (insn) = new_tick;
1836 
1837       if (INSN_EXACT_TICK (insn) != INVALID_TICK
1838 	  && INSN_EXACT_TICK (insn) < clock_var + n_cycles)
1839 	{
1840 	  must_backtrack = true;
1841 	  if (sched_verbose >= 2)
1842 	    fprintf (sched_dump, ";;\t\tcausing a backtrack.\n");
1843 	}
1844     }
1845 }
1846 
1847 /* Remove INSN from queue.  */
1848 static void
1849 queue_remove (rtx insn)
1850 {
1851   gcc_assert (QUEUE_INDEX (insn) >= 0);
1852   remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
1853   q_size--;
1854   QUEUE_INDEX (insn) = QUEUE_NOWHERE;
1855 }
1856 
1857 /* Return a pointer to the bottom of the ready list, i.e. the insn
1858    with the lowest priority.  */
1859 
1860 rtx *
1861 ready_lastpos (struct ready_list *ready)
1862 {
1863   gcc_assert (ready->n_ready >= 1);
1864   return ready->vec + ready->first - ready->n_ready + 1;
1865 }
1866 
1867 /* Add an element INSN to the ready list so that it ends up with the
1868    lowest/highest priority depending on FIRST_P.  */
1869 
1870 HAIFA_INLINE static void
1871 ready_add (struct ready_list *ready, rtx insn, bool first_p)
1872 {
1873   if (!first_p)
1874     {
1875       if (ready->first == ready->n_ready)
1876 	{
1877 	  memmove (ready->vec + ready->veclen - ready->n_ready,
1878 		   ready_lastpos (ready),
1879 		   ready->n_ready * sizeof (rtx));
1880 	  ready->first = ready->veclen - 1;
1881 	}
1882       ready->vec[ready->first - ready->n_ready] = insn;
1883     }
1884   else
1885     {
1886       if (ready->first == ready->veclen - 1)
1887 	{
1888 	  if (ready->n_ready)
1889 	    /* ready_lastpos() fails when called with (ready->n_ready == 0).  */
1890 	    memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1891 		     ready_lastpos (ready),
1892 		     ready->n_ready * sizeof (rtx));
1893 	  ready->first = ready->veclen - 2;
1894 	}
1895       ready->vec[++(ready->first)] = insn;
1896     }
1897 
1898   ready->n_ready++;
1899   if (DEBUG_INSN_P (insn))
1900     ready->n_debug++;
1901 
1902   gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1903   QUEUE_INDEX (insn) = QUEUE_READY;
1904 
1905   if (INSN_EXACT_TICK (insn) != INVALID_TICK
1906       && INSN_EXACT_TICK (insn) < clock_var)
1907     {
1908       must_backtrack = true;
1909     }
1910 }
1911 
1912 /* Remove the element with the highest priority from the ready list and
1913    return it.  */
1914 
1915 HAIFA_INLINE static rtx
1916 ready_remove_first (struct ready_list *ready)
1917 {
1918   rtx t;
1919 
1920   gcc_assert (ready->n_ready);
1921   t = ready->vec[ready->first--];
1922   ready->n_ready--;
1923   if (DEBUG_INSN_P (t))
1924     ready->n_debug--;
1925   /* If the queue becomes empty, reset it.  */
1926   if (ready->n_ready == 0)
1927     ready->first = ready->veclen - 1;
1928 
1929   gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1930   QUEUE_INDEX (t) = QUEUE_NOWHERE;
1931 
1932   return t;
1933 }
1934 
1935 /* The following code implements multi-pass scheduling for the first
1936    cycle.  In other words, we will try to choose ready insn which
1937    permits to start maximum number of insns on the same cycle.  */
1938 
1939 /* Return a pointer to the element INDEX from the ready.  INDEX for
1940    insn with the highest priority is 0, and the lowest priority has
1941    N_READY - 1.  */
1942 
1943 rtx
1944 ready_element (struct ready_list *ready, int index)
1945 {
1946   gcc_assert (ready->n_ready && index < ready->n_ready);
1947 
1948   return ready->vec[ready->first - index];
1949 }
1950 
1951 /* Remove the element INDEX from the ready list and return it.  INDEX
1952    for insn with the highest priority is 0, and the lowest priority
1953    has N_READY - 1.  */
1954 
1955 HAIFA_INLINE static rtx
1956 ready_remove (struct ready_list *ready, int index)
1957 {
1958   rtx t;
1959   int i;
1960 
1961   if (index == 0)
1962     return ready_remove_first (ready);
1963   gcc_assert (ready->n_ready && index < ready->n_ready);
1964   t = ready->vec[ready->first - index];
1965   ready->n_ready--;
1966   if (DEBUG_INSN_P (t))
1967     ready->n_debug--;
1968   for (i = index; i < ready->n_ready; i++)
1969     ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1970   QUEUE_INDEX (t) = QUEUE_NOWHERE;
1971   return t;
1972 }
1973 
1974 /* Remove INSN from the ready list.  */
1975 static void
1976 ready_remove_insn (rtx insn)
1977 {
1978   int i;
1979 
1980   for (i = 0; i < readyp->n_ready; i++)
1981     if (ready_element (readyp, i) == insn)
1982       {
1983         ready_remove (readyp, i);
1984         return;
1985       }
1986   gcc_unreachable ();
1987 }
1988 
1989 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1990    macro.  */
1991 
1992 void
1993 ready_sort (struct ready_list *ready)
1994 {
1995   int i;
1996   rtx *first = ready_lastpos (ready);
1997 
1998   if (sched_pressure_p)
1999     {
2000       for (i = 0; i < ready->n_ready; i++)
2001 	if (!DEBUG_INSN_P (first[i]))
2002 	  setup_insn_reg_pressure_info (first[i]);
2003     }
2004   SCHED_SORT (first, ready->n_ready);
2005 }
2006 
2007 /* PREV is an insn that is ready to execute.  Adjust its priority if that
2008    will help shorten or lengthen register lifetimes as appropriate.  Also
2009    provide a hook for the target to tweak itself.  */
2010 
2011 HAIFA_INLINE static void
2012 adjust_priority (rtx prev)
2013 {
2014   /* ??? There used to be code here to try and estimate how an insn
2015      affected register lifetimes, but it did it by looking at REG_DEAD
2016      notes, which we removed in schedule_region.  Nor did it try to
2017      take into account register pressure or anything useful like that.
2018 
2019      Revisit when we have a machine model to work with and not before.  */
2020 
2021   if (targetm.sched.adjust_priority)
2022     INSN_PRIORITY (prev) =
2023       targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
2024 }
2025 
2026 /* Advance DFA state STATE on one cycle.  */
2027 void
2028 advance_state (state_t state)
2029 {
2030   if (targetm.sched.dfa_pre_advance_cycle)
2031     targetm.sched.dfa_pre_advance_cycle ();
2032 
2033   if (targetm.sched.dfa_pre_cycle_insn)
2034     state_transition (state,
2035 		      targetm.sched.dfa_pre_cycle_insn ());
2036 
2037   state_transition (state, NULL);
2038 
2039   if (targetm.sched.dfa_post_cycle_insn)
2040     state_transition (state,
2041 		      targetm.sched.dfa_post_cycle_insn ());
2042 
2043   if (targetm.sched.dfa_post_advance_cycle)
2044     targetm.sched.dfa_post_advance_cycle ();
2045 }
2046 
2047 /* Advance time on one cycle.  */
2048 HAIFA_INLINE static void
2049 advance_one_cycle (void)
2050 {
2051   advance_state (curr_state);
2052   if (sched_verbose >= 6)
2053     fprintf (sched_dump, ";;\tAdvanced a state.\n");
2054 }
2055 
2056 /* Update register pressure after scheduling INSN.  */
2057 static void
2058 update_register_pressure (rtx insn)
2059 {
2060   struct reg_use_data *use;
2061   struct reg_set_data *set;
2062 
2063   gcc_checking_assert (!DEBUG_INSN_P (insn));
2064 
2065   for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2066     if (dying_use_p (use) && bitmap_bit_p (curr_reg_live, use->regno))
2067       mark_regno_birth_or_death (use->regno, false);
2068   for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
2069     mark_regno_birth_or_death (set->regno, true);
2070 }
2071 
2072 /* Set up or update (if UPDATE_P) max register pressure (see its
2073    meaning in sched-int.h::_haifa_insn_data) for all current BB insns
2074    after insn AFTER.  */
2075 static void
2076 setup_insn_max_reg_pressure (rtx after, bool update_p)
2077 {
2078   int i, p;
2079   bool eq_p;
2080   rtx insn;
2081   static int max_reg_pressure[N_REG_CLASSES];
2082 
2083   save_reg_pressure ();
2084   for (i = 0; i < ira_pressure_classes_num; i++)
2085     max_reg_pressure[ira_pressure_classes[i]]
2086       = curr_reg_pressure[ira_pressure_classes[i]];
2087   for (insn = NEXT_INSN (after);
2088        insn != NULL_RTX && ! BARRIER_P (insn)
2089 	 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
2090        insn = NEXT_INSN (insn))
2091     if (NONDEBUG_INSN_P (insn))
2092       {
2093 	eq_p = true;
2094 	for (i = 0; i < ira_pressure_classes_num; i++)
2095 	  {
2096 	    p = max_reg_pressure[ira_pressure_classes[i]];
2097 	    if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
2098 	      {
2099 		eq_p = false;
2100 		INSN_MAX_REG_PRESSURE (insn)[i]
2101 		  = max_reg_pressure[ira_pressure_classes[i]];
2102 	      }
2103 	  }
2104 	if (update_p && eq_p)
2105 	  break;
2106 	update_register_pressure (insn);
2107 	for (i = 0; i < ira_pressure_classes_num; i++)
2108 	  if (max_reg_pressure[ira_pressure_classes[i]]
2109 	      < curr_reg_pressure[ira_pressure_classes[i]])
2110 	    max_reg_pressure[ira_pressure_classes[i]]
2111 	      = curr_reg_pressure[ira_pressure_classes[i]];
2112       }
2113   restore_reg_pressure ();
2114 }
2115 
2116 /* Update the current register pressure after scheduling INSN.  Update
2117    also max register pressure for unscheduled insns of the current
2118    BB.  */
2119 static void
2120 update_reg_and_insn_max_reg_pressure (rtx insn)
2121 {
2122   int i;
2123   int before[N_REG_CLASSES];
2124 
2125   for (i = 0; i < ira_pressure_classes_num; i++)
2126     before[i] = curr_reg_pressure[ira_pressure_classes[i]];
2127   update_register_pressure (insn);
2128   for (i = 0; i < ira_pressure_classes_num; i++)
2129     if (curr_reg_pressure[ira_pressure_classes[i]] != before[i])
2130       break;
2131   if (i < ira_pressure_classes_num)
2132     setup_insn_max_reg_pressure (insn, true);
2133 }
2134 
2135 /* Set up register pressure at the beginning of basic block BB whose
2136    insns starting after insn AFTER.  Set up also max register pressure
2137    for all insns of the basic block.  */
2138 void
2139 sched_setup_bb_reg_pressure_info (basic_block bb, rtx after)
2140 {
2141   gcc_assert (sched_pressure_p);
2142   initiate_bb_reg_pressure_info (bb);
2143   setup_insn_max_reg_pressure (after, false);
2144 }
2145 
2146 /* If doing predication while scheduling, verify whether INSN, which
2147    has just been scheduled, clobbers the conditions of any
2148    instructions that must be predicated in order to break their
2149    dependencies.  If so, remove them from the queues so that they will
2150    only be scheduled once their control dependency is resolved.  */
2151 
2152 static void
2153 check_clobbered_conditions (rtx insn)
2154 {
2155   HARD_REG_SET t;
2156   int i;
2157 
2158   if ((current_sched_info->flags & DO_PREDICATION) == 0)
2159     return;
2160 
2161   find_all_hard_reg_sets (insn, &t);
2162 
2163  restart:
2164   for (i = 0; i < ready.n_ready; i++)
2165     {
2166       rtx x = ready_element (&ready, i);
2167       if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
2168 	{
2169 	  ready_remove_insn (x);
2170 	  goto restart;
2171 	}
2172     }
2173   for (i = 0; i <= max_insn_queue_index; i++)
2174     {
2175       rtx link;
2176       int q = NEXT_Q_AFTER (q_ptr, i);
2177 
2178     restart_queue:
2179       for (link = insn_queue[q]; link; link = XEXP (link, 1))
2180 	{
2181 	  rtx x = XEXP (link, 0);
2182 	  if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
2183 	    {
2184 	      queue_remove (x);
2185 	      goto restart_queue;
2186 	    }
2187 	}
2188     }
2189 }
2190 
2191 /* A structure that holds local state for the loop in schedule_block.  */
2192 struct sched_block_state
2193 {
2194   /* True if no real insns have been scheduled in the current cycle.  */
2195   bool first_cycle_insn_p;
2196   /* True if a shadow insn has been scheduled in the current cycle, which
2197      means that no more normal insns can be issued.  */
2198   bool shadows_only_p;
2199   /* True if we're winding down a modulo schedule, which means that we only
2200      issue insns with INSN_EXACT_TICK set.  */
2201   bool modulo_epilogue;
2202   /* Initialized with the machine's issue rate every cycle, and updated
2203      by calls to the variable_issue hook.  */
2204   int can_issue_more;
2205 };
2206 
2207 /* INSN is the "currently executing insn".  Launch each insn which was
2208    waiting on INSN.  READY is the ready list which contains the insns
2209    that are ready to fire.  CLOCK is the current cycle.  The function
2210    returns necessary cycle advance after issuing the insn (it is not
2211    zero for insns in a schedule group).  */
2212 
2213 static int
2214 schedule_insn (rtx insn)
2215 {
2216   sd_iterator_def sd_it;
2217   dep_t dep;
2218   int i;
2219   int advance = 0;
2220 
2221   if (sched_verbose >= 1)
2222     {
2223       struct reg_pressure_data *pressure_info;
2224       char buf[2048];
2225 
2226       print_insn (buf, insn, 0);
2227       buf[40] = 0;
2228       fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
2229 
2230       if (recog_memoized (insn) < 0)
2231 	fprintf (sched_dump, "nothing");
2232       else
2233 	print_reservation (sched_dump, insn);
2234       pressure_info = INSN_REG_PRESSURE (insn);
2235       if (pressure_info != NULL)
2236 	{
2237 	  fputc (':', sched_dump);
2238 	  for (i = 0; i < ira_pressure_classes_num; i++)
2239 	    fprintf (sched_dump, "%s%+d(%d)",
2240 		     reg_class_names[ira_pressure_classes[i]],
2241 		     pressure_info[i].set_increase, pressure_info[i].change);
2242 	}
2243       fputc ('\n', sched_dump);
2244     }
2245 
2246   if (sched_pressure_p && !DEBUG_INSN_P (insn))
2247     update_reg_and_insn_max_reg_pressure (insn);
2248 
2249   /* Scheduling instruction should have all its dependencies resolved and
2250      should have been removed from the ready list.  */
2251   gcc_assert (sd_lists_empty_p (insn, SD_LIST_HARD_BACK));
2252 
2253   /* Reset debug insns invalidated by moving this insn.  */
2254   if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
2255     for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
2256 	 sd_iterator_cond (&sd_it, &dep);)
2257       {
2258 	rtx dbg = DEP_PRO (dep);
2259 	struct reg_use_data *use, *next;
2260 
2261 	if (DEP_STATUS (dep) & DEP_CANCELLED)
2262 	  {
2263 	    sd_iterator_next (&sd_it);
2264 	    continue;
2265 	  }
2266 
2267 	gcc_assert (DEBUG_INSN_P (dbg));
2268 
2269 	if (sched_verbose >= 6)
2270 	  fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
2271 		   INSN_UID (dbg));
2272 
2273 	/* ??? Rather than resetting the debug insn, we might be able
2274 	   to emit a debug temp before the just-scheduled insn, but
2275 	   this would involve checking that the expression at the
2276 	   point of the debug insn is equivalent to the expression
2277 	   before the just-scheduled insn.  They might not be: the
2278 	   expression in the debug insn may depend on other insns not
2279 	   yet scheduled that set MEMs, REGs or even other debug
2280 	   insns.  It's not clear that attempting to preserve debug
2281 	   information in these cases is worth the effort, given how
2282 	   uncommon these resets are and the likelihood that the debug
2283 	   temps introduced won't survive the schedule change.  */
2284 	INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
2285 	df_insn_rescan (dbg);
2286 
2287 	/* Unknown location doesn't use any registers.  */
2288 	for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
2289 	  {
2290 	    struct reg_use_data *prev = use;
2291 
2292 	    /* Remove use from the cyclic next_regno_use chain first.  */
2293 	    while (prev->next_regno_use != use)
2294 	      prev = prev->next_regno_use;
2295 	    prev->next_regno_use = use->next_regno_use;
2296 	    next = use->next_insn_use;
2297 	    free (use);
2298 	  }
2299 	INSN_REG_USE_LIST (dbg) = NULL;
2300 
2301 	/* We delete rather than resolve these deps, otherwise we
2302 	   crash in sched_free_deps(), because forward deps are
2303 	   expected to be released before backward deps.  */
2304 	sd_delete_dep (sd_it);
2305       }
2306 
2307   gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
2308   QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
2309 
2310   gcc_assert (INSN_TICK (insn) >= MIN_TICK);
2311   if (INSN_TICK (insn) > clock_var)
2312     /* INSN has been prematurely moved from the queue to the ready list.
2313        This is possible only if following flag is set.  */
2314     gcc_assert (flag_sched_stalled_insns);
2315 
2316   /* ??? Probably, if INSN is scheduled prematurely, we should leave
2317      INSN_TICK untouched.  This is a machine-dependent issue, actually.  */
2318   INSN_TICK (insn) = clock_var;
2319 
2320   check_clobbered_conditions (insn);
2321 
2322   /* Update dependent instructions.  */
2323   for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
2324        sd_iterator_cond (&sd_it, &dep);)
2325     {
2326       rtx next = DEP_CON (dep);
2327       bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0;
2328 
2329       /* Resolve the dependence between INSN and NEXT.
2330 	 sd_resolve_dep () moves current dep to another list thus
2331 	 advancing the iterator.  */
2332       sd_resolve_dep (sd_it);
2333 
2334       if (cancelled)
2335 	{
2336 	  if (QUEUE_INDEX (next) != QUEUE_SCHEDULED)
2337 	    {
2338 	      int tick = INSN_TICK (next);
2339 	      gcc_assert (ORIG_PAT (next) != NULL_RTX);
2340 	      haifa_change_pattern (next, ORIG_PAT (next));
2341 	      INSN_TICK (next) = tick;
2342 	      if (sd_lists_empty_p (next, SD_LIST_BACK))
2343 		TODO_SPEC (next) = 0;
2344 	      else if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
2345 		TODO_SPEC (next) = HARD_DEP;
2346 	    }
2347 	  continue;
2348 	}
2349 
2350       /* Don't bother trying to mark next as ready if insn is a debug
2351 	 insn.  If insn is the last hard dependency, it will have
2352 	 already been discounted.  */
2353       if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
2354 	continue;
2355 
2356       if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
2357 	{
2358 	  int effective_cost;
2359 
2360 	  effective_cost = try_ready (next);
2361 
2362 	  if (effective_cost >= 0
2363 	      && SCHED_GROUP_P (next)
2364 	      && advance < effective_cost)
2365 	    advance = effective_cost;
2366 	}
2367       else
2368 	/* Check always has only one forward dependence (to the first insn in
2369 	   the recovery block), therefore, this will be executed only once.  */
2370 	{
2371 	  gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
2372 	  fix_recovery_deps (RECOVERY_BLOCK (insn));
2373 	}
2374     }
2375 
2376   /* Annotate the instruction with issue information -- TImode
2377      indicates that the instruction is expected not to be able
2378      to issue on the same cycle as the previous insn.  A machine
2379      may use this information to decide how the instruction should
2380      be aligned.  */
2381   if (issue_rate > 1
2382       && GET_CODE (PATTERN (insn)) != USE
2383       && GET_CODE (PATTERN (insn)) != CLOBBER
2384       && !DEBUG_INSN_P (insn))
2385     {
2386       if (reload_completed)
2387 	PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
2388       last_clock_var = clock_var;
2389     }
2390 
2391   return advance;
2392 }
2393 
2394 /* Functions for handling of notes.  */
2395 
2396 /* Add note list that ends on FROM_END to the end of TO_ENDP.  */
2397 void
2398 concat_note_lists (rtx from_end, rtx *to_endp)
2399 {
2400   rtx from_start;
2401 
2402   /* It's easy when have nothing to concat.  */
2403   if (from_end == NULL)
2404     return;
2405 
2406   /* It's also easy when destination is empty.  */
2407   if (*to_endp == NULL)
2408     {
2409       *to_endp = from_end;
2410       return;
2411     }
2412 
2413   from_start = from_end;
2414   while (PREV_INSN (from_start) != NULL)
2415     from_start = PREV_INSN (from_start);
2416 
2417   PREV_INSN (from_start) = *to_endp;
2418   NEXT_INSN (*to_endp) = from_start;
2419   *to_endp = from_end;
2420 }
2421 
2422 /* Delete notes between HEAD and TAIL and put them in the chain
2423    of notes ended by NOTE_LIST.  */
2424 void
2425 remove_notes (rtx head, rtx tail)
2426 {
2427   rtx next_tail, insn, next;
2428 
2429   note_list = 0;
2430   if (head == tail && !INSN_P (head))
2431     return;
2432 
2433   next_tail = NEXT_INSN (tail);
2434   for (insn = head; insn != next_tail; insn = next)
2435     {
2436       next = NEXT_INSN (insn);
2437       if (!NOTE_P (insn))
2438 	continue;
2439 
2440       switch (NOTE_KIND (insn))
2441 	{
2442 	case NOTE_INSN_BASIC_BLOCK:
2443 	  continue;
2444 
2445 	case NOTE_INSN_EPILOGUE_BEG:
2446 	  if (insn != tail)
2447 	    {
2448 	      remove_insn (insn);
2449 	      add_reg_note (next, REG_SAVE_NOTE,
2450 			    GEN_INT (NOTE_INSN_EPILOGUE_BEG));
2451 	      break;
2452 	    }
2453 	  /* FALLTHRU */
2454 
2455 	default:
2456 	  remove_insn (insn);
2457 
2458 	  /* Add the note to list that ends at NOTE_LIST.  */
2459 	  PREV_INSN (insn) = note_list;
2460 	  NEXT_INSN (insn) = NULL_RTX;
2461 	  if (note_list)
2462 	    NEXT_INSN (note_list) = insn;
2463 	  note_list = insn;
2464 	  break;
2465 	}
2466 
2467       gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
2468     }
2469 }
2470 
2471 /* A structure to record enough data to allow us to backtrack the scheduler to
2472    a previous state.  */
2473 struct haifa_saved_data
2474 {
2475   /* Next entry on the list.  */
2476   struct haifa_saved_data *next;
2477 
2478   /* Backtracking is associated with scheduling insns that have delay slots.
2479      DELAY_PAIR points to the structure that contains the insns involved, and
2480      the number of cycles between them.  */
2481   struct delay_pair *delay_pair;
2482 
2483   /* Data used by the frontend (e.g. sched-ebb or sched-rgn).  */
2484   void *fe_saved_data;
2485   /* Data used by the backend.  */
2486   void *be_saved_data;
2487 
2488   /* Copies of global state.  */
2489   int clock_var, last_clock_var;
2490   struct ready_list ready;
2491   state_t curr_state;
2492 
2493   rtx last_scheduled_insn;
2494   rtx last_nondebug_scheduled_insn;
2495   int cycle_issued_insns;
2496 
2497   /* Copies of state used in the inner loop of schedule_block.  */
2498   struct sched_block_state sched_block;
2499 
2500   /* We don't need to save q_ptr, as its value is arbitrary and we can set it
2501      to 0 when restoring.  */
2502   int q_size;
2503   rtx *insn_queue;
2504 };
2505 
2506 /* A record, in reverse order, of all scheduled insns which have delay slots
2507    and may require backtracking.  */
2508 static struct haifa_saved_data *backtrack_queue;
2509 
2510 /* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
2511    to SET_P.  */
2512 static void
2513 mark_backtrack_feeds (rtx insn, int set_p)
2514 {
2515   sd_iterator_def sd_it;
2516   dep_t dep;
2517   FOR_EACH_DEP (insn, SD_LIST_HARD_BACK, sd_it, dep)
2518     {
2519       FEEDS_BACKTRACK_INSN (DEP_PRO (dep)) = set_p;
2520     }
2521 }
2522 
2523 /* Save the current scheduler state so that we can backtrack to it
2524    later if necessary.  PAIR gives the insns that make it necessary to
2525    save this point.  SCHED_BLOCK is the local state of schedule_block
2526    that need to be saved.  */
2527 static void
2528 save_backtrack_point (struct delay_pair *pair,
2529 		      struct sched_block_state sched_block)
2530 {
2531   int i;
2532   struct haifa_saved_data *save = XNEW (struct haifa_saved_data);
2533 
2534   save->curr_state = xmalloc (dfa_state_size);
2535   memcpy (save->curr_state, curr_state, dfa_state_size);
2536 
2537   save->ready.first = ready.first;
2538   save->ready.n_ready = ready.n_ready;
2539   save->ready.n_debug = ready.n_debug;
2540   save->ready.veclen = ready.veclen;
2541   save->ready.vec = XNEWVEC (rtx, ready.veclen);
2542   memcpy (save->ready.vec, ready.vec, ready.veclen * sizeof (rtx));
2543 
2544   save->insn_queue = XNEWVEC (rtx, max_insn_queue_index + 1);
2545   save->q_size = q_size;
2546   for (i = 0; i <= max_insn_queue_index; i++)
2547     {
2548       int q = NEXT_Q_AFTER (q_ptr, i);
2549       save->insn_queue[i] = copy_INSN_LIST (insn_queue[q]);
2550     }
2551 
2552   save->clock_var = clock_var;
2553   save->last_clock_var = last_clock_var;
2554   save->cycle_issued_insns = cycle_issued_insns;
2555   save->last_scheduled_insn = last_scheduled_insn;
2556   save->last_nondebug_scheduled_insn = last_nondebug_scheduled_insn;
2557 
2558   save->sched_block = sched_block;
2559 
2560   if (current_sched_info->save_state)
2561     save->fe_saved_data = (*current_sched_info->save_state) ();
2562 
2563   if (targetm.sched.alloc_sched_context)
2564     {
2565       save->be_saved_data = targetm.sched.alloc_sched_context ();
2566       targetm.sched.init_sched_context (save->be_saved_data, false);
2567     }
2568   else
2569     save->be_saved_data = NULL;
2570 
2571   save->delay_pair = pair;
2572 
2573   save->next = backtrack_queue;
2574   backtrack_queue = save;
2575 
2576   while (pair)
2577     {
2578       mark_backtrack_feeds (pair->i2, 1);
2579       INSN_TICK (pair->i2) = INVALID_TICK;
2580       INSN_EXACT_TICK (pair->i2) = clock_var + pair_delay (pair);
2581       SHADOW_P (pair->i2) = pair->stages == 0;
2582       pair = pair->next_same_i1;
2583     }
2584 }
2585 
2586 /* Walk the ready list and all queues. If any insns have unresolved backwards
2587    dependencies, these must be cancelled deps, broken by predication.  Set or
2588    clear (depending on SET) the DEP_CANCELLED bit in DEP_STATUS.  */
2589 
2590 static void
2591 toggle_cancelled_flags (bool set)
2592 {
2593   int i;
2594   sd_iterator_def sd_it;
2595   dep_t dep;
2596 
2597   if (ready.n_ready > 0)
2598     {
2599       rtx *first = ready_lastpos (&ready);
2600       for (i = 0; i < ready.n_ready; i++)
2601 	FOR_EACH_DEP (first[i], SD_LIST_BACK, sd_it, dep)
2602 	  if (!DEBUG_INSN_P (DEP_PRO (dep)))
2603 	    {
2604 	      if (set)
2605 		DEP_STATUS (dep) |= DEP_CANCELLED;
2606 	      else
2607 		DEP_STATUS (dep) &= ~DEP_CANCELLED;
2608 	    }
2609     }
2610   for (i = 0; i <= max_insn_queue_index; i++)
2611     {
2612       int q = NEXT_Q_AFTER (q_ptr, i);
2613       rtx link;
2614       for (link = insn_queue[q]; link; link = XEXP (link, 1))
2615 	{
2616 	  rtx insn = XEXP (link, 0);
2617 	  FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
2618 	    if (!DEBUG_INSN_P (DEP_PRO (dep)))
2619 	      {
2620 		if (set)
2621 		  DEP_STATUS (dep) |= DEP_CANCELLED;
2622 		else
2623 		  DEP_STATUS (dep) &= ~DEP_CANCELLED;
2624 	      }
2625 	}
2626     }
2627 }
2628 
2629 /* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
2630    Restore their dependencies to an unresolved state, and mark them as
2631    queued nowhere.  */
2632 
2633 static void
2634 unschedule_insns_until (rtx insn)
2635 {
2636   VEC (rtx, heap) *recompute_vec;
2637 
2638   recompute_vec = VEC_alloc (rtx, heap, 0);
2639 
2640   /* Make two passes over the insns to be unscheduled.  First, we clear out
2641      dependencies and other trivial bookkeeping.  */
2642   for (;;)
2643     {
2644       rtx last;
2645       sd_iterator_def sd_it;
2646       dep_t dep;
2647 
2648       last = VEC_pop (rtx, scheduled_insns);
2649 
2650       /* This will be changed by restore_backtrack_point if the insn is in
2651 	 any queue.  */
2652       QUEUE_INDEX (last) = QUEUE_NOWHERE;
2653       if (last != insn)
2654 	INSN_TICK (last) = INVALID_TICK;
2655 
2656       if (modulo_ii > 0 && INSN_UID (last) < modulo_iter0_max_uid)
2657 	modulo_insns_scheduled--;
2658 
2659       for (sd_it = sd_iterator_start (last, SD_LIST_RES_FORW);
2660 	   sd_iterator_cond (&sd_it, &dep);)
2661 	{
2662 	  rtx con = DEP_CON (dep);
2663 	  sd_unresolve_dep (sd_it);
2664 	  if (!MUST_RECOMPUTE_SPEC_P (con))
2665 	    {
2666 	      MUST_RECOMPUTE_SPEC_P (con) = 1;
2667 	      VEC_safe_push (rtx, heap, recompute_vec, con);
2668 	    }
2669 	}
2670 
2671       if (last == insn)
2672 	break;
2673     }
2674 
2675   /* A second pass, to update ready and speculation status for insns
2676      depending on the unscheduled ones.  The first pass must have
2677      popped the scheduled_insns vector up to the point where we
2678      restart scheduling, as recompute_todo_spec requires it to be
2679      up-to-date.  */
2680   while (!VEC_empty (rtx, recompute_vec))
2681     {
2682       rtx con;
2683 
2684       con = VEC_pop (rtx, recompute_vec);
2685       MUST_RECOMPUTE_SPEC_P (con) = 0;
2686       if (!sd_lists_empty_p (con, SD_LIST_HARD_BACK))
2687 	{
2688 	  TODO_SPEC (con) = HARD_DEP;
2689 	  INSN_TICK (con) = INVALID_TICK;
2690 	  if (PREDICATED_PAT (con) != NULL_RTX)
2691 	    haifa_change_pattern (con, ORIG_PAT (con));
2692 	}
2693       else if (QUEUE_INDEX (con) != QUEUE_SCHEDULED)
2694 	TODO_SPEC (con) = recompute_todo_spec (con);
2695     }
2696   VEC_free (rtx, heap, recompute_vec);
2697 }
2698 
2699 /* Restore scheduler state from the topmost entry on the backtracking queue.
2700    PSCHED_BLOCK_P points to the local data of schedule_block that we must
2701    overwrite with the saved data.
2702    The caller must already have called unschedule_insns_until.  */
2703 
2704 static void
2705 restore_last_backtrack_point (struct sched_block_state *psched_block)
2706 {
2707   rtx link;
2708   int i;
2709   struct haifa_saved_data *save = backtrack_queue;
2710 
2711   backtrack_queue = save->next;
2712 
2713   if (current_sched_info->restore_state)
2714     (*current_sched_info->restore_state) (save->fe_saved_data);
2715 
2716   if (targetm.sched.alloc_sched_context)
2717     {
2718       targetm.sched.set_sched_context (save->be_saved_data);
2719       targetm.sched.free_sched_context (save->be_saved_data);
2720     }
2721 
2722   /* Clear the QUEUE_INDEX of everything in the ready list or one
2723      of the queues.  */
2724   if (ready.n_ready > 0)
2725     {
2726       rtx *first = ready_lastpos (&ready);
2727       for (i = 0; i < ready.n_ready; i++)
2728 	{
2729 	  rtx insn = first[i];
2730 	  QUEUE_INDEX (insn) = QUEUE_NOWHERE;
2731 	  INSN_TICK (insn) = INVALID_TICK;
2732 	}
2733     }
2734   for (i = 0; i <= max_insn_queue_index; i++)
2735     {
2736       int q = NEXT_Q_AFTER (q_ptr, i);
2737 
2738       for (link = insn_queue[q]; link; link = XEXP (link, 1))
2739 	{
2740 	  rtx x = XEXP (link, 0);
2741 	  QUEUE_INDEX (x) = QUEUE_NOWHERE;
2742 	  INSN_TICK (x) = INVALID_TICK;
2743 	}
2744       free_INSN_LIST_list (&insn_queue[q]);
2745     }
2746 
2747   free (ready.vec);
2748   ready = save->ready;
2749 
2750   if (ready.n_ready > 0)
2751     {
2752       rtx *first = ready_lastpos (&ready);
2753       for (i = 0; i < ready.n_ready; i++)
2754 	{
2755 	  rtx insn = first[i];
2756 	  QUEUE_INDEX (insn) = QUEUE_READY;
2757 	  TODO_SPEC (insn) = recompute_todo_spec (insn);
2758 	  INSN_TICK (insn) = save->clock_var;
2759 	}
2760     }
2761 
2762   q_ptr = 0;
2763   q_size = save->q_size;
2764   for (i = 0; i <= max_insn_queue_index; i++)
2765     {
2766       int q = NEXT_Q_AFTER (q_ptr, i);
2767 
2768       insn_queue[q] = save->insn_queue[q];
2769 
2770       for (link = insn_queue[q]; link; link = XEXP (link, 1))
2771 	{
2772 	  rtx x = XEXP (link, 0);
2773 	  QUEUE_INDEX (x) = i;
2774 	  TODO_SPEC (x) = recompute_todo_spec (x);
2775 	  INSN_TICK (x) = save->clock_var + i;
2776 	}
2777     }
2778   free (save->insn_queue);
2779 
2780   toggle_cancelled_flags (true);
2781 
2782   clock_var = save->clock_var;
2783   last_clock_var = save->last_clock_var;
2784   cycle_issued_insns = save->cycle_issued_insns;
2785   last_scheduled_insn = save->last_scheduled_insn;
2786   last_nondebug_scheduled_insn = save->last_nondebug_scheduled_insn;
2787 
2788   *psched_block = save->sched_block;
2789 
2790   memcpy (curr_state, save->curr_state, dfa_state_size);
2791   free (save->curr_state);
2792 
2793   mark_backtrack_feeds (save->delay_pair->i2, 0);
2794 
2795   free (save);
2796 
2797   for (save = backtrack_queue; save; save = save->next)
2798     {
2799       mark_backtrack_feeds (save->delay_pair->i2, 1);
2800     }
2801 }
2802 
2803 /* Discard all data associated with the topmost entry in the backtrack
2804    queue.  If RESET_TICK is false, we just want to free the data.  If true,
2805    we are doing this because we discovered a reason to backtrack.  In the
2806    latter case, also reset the INSN_TICK for the shadow insn.  */
2807 static void
2808 free_topmost_backtrack_point (bool reset_tick)
2809 {
2810   struct haifa_saved_data *save = backtrack_queue;
2811   int i;
2812 
2813   backtrack_queue = save->next;
2814 
2815   if (reset_tick)
2816     {
2817       struct delay_pair *pair = save->delay_pair;
2818       while (pair)
2819 	{
2820 	  INSN_TICK (pair->i2) = INVALID_TICK;
2821 	  INSN_EXACT_TICK (pair->i2) = INVALID_TICK;
2822 	  pair = pair->next_same_i1;
2823 	}
2824     }
2825   if (targetm.sched.free_sched_context)
2826     targetm.sched.free_sched_context (save->be_saved_data);
2827   if (current_sched_info->restore_state)
2828     free (save->fe_saved_data);
2829   for (i = 0; i <= max_insn_queue_index; i++)
2830     free_INSN_LIST_list (&save->insn_queue[i]);
2831   free (save->insn_queue);
2832   free (save->curr_state);
2833   free (save->ready.vec);
2834   free (save);
2835 }
2836 
2837 /* Free the entire backtrack queue.  */
2838 static void
2839 free_backtrack_queue (void)
2840 {
2841   while (backtrack_queue)
2842     free_topmost_backtrack_point (false);
2843 }
2844 
2845 /* Compute INSN_TICK_ESTIMATE for INSN.  PROCESSED is a bitmap of
2846    instructions we've previously encountered, a set bit prevents
2847    recursion.  BUDGET is a limit on how far ahead we look, it is
2848    reduced on recursive calls.  Return true if we produced a good
2849    estimate, or false if we exceeded the budget.  */
2850 static bool
2851 estimate_insn_tick (bitmap processed, rtx insn, int budget)
2852 {
2853   sd_iterator_def sd_it;
2854   dep_t dep;
2855   int earliest = INSN_TICK (insn);
2856 
2857   FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
2858     {
2859       rtx pro = DEP_PRO (dep);
2860       int t;
2861 
2862       if (DEP_STATUS (dep) & DEP_CANCELLED)
2863 	continue;
2864 
2865       if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
2866 	gcc_assert (INSN_TICK (pro) + dep_cost (dep) <= INSN_TICK (insn));
2867       else
2868 	{
2869 	  int cost = dep_cost (dep);
2870 	  if (cost >= budget)
2871 	    return false;
2872 	  if (!bitmap_bit_p (processed, INSN_LUID (pro)))
2873 	    {
2874 	      if (!estimate_insn_tick (processed, pro, budget - cost))
2875 		return false;
2876 	    }
2877 	  gcc_assert (INSN_TICK_ESTIMATE (pro) != INVALID_TICK);
2878 	  t = INSN_TICK_ESTIMATE (pro) + cost;
2879 	  if (earliest == INVALID_TICK || t > earliest)
2880 	    earliest = t;
2881 	}
2882     }
2883   bitmap_set_bit (processed, INSN_LUID (insn));
2884   INSN_TICK_ESTIMATE (insn) = earliest;
2885   return true;
2886 }
2887 
2888 /* Examine the pair of insns in P, and estimate (optimistically, assuming
2889    infinite resources) the cycle in which the delayed shadow can be issued.
2890    Return the number of cycles that must pass before the real insn can be
2891    issued in order to meet this constraint.  */
2892 static int
2893 estimate_shadow_tick (struct delay_pair *p)
2894 {
2895   bitmap_head processed;
2896   int t;
2897   bool cutoff;
2898   bitmap_initialize (&processed, 0);
2899 
2900   cutoff = !estimate_insn_tick (&processed, p->i2,
2901 				max_insn_queue_index + pair_delay (p));
2902   bitmap_clear (&processed);
2903   if (cutoff)
2904     return max_insn_queue_index;
2905   t = INSN_TICK_ESTIMATE (p->i2) - (clock_var + pair_delay (p) + 1);
2906   if (t > 0)
2907     return t;
2908   return 0;
2909 }
2910 
2911 /* If INSN has no unresolved backwards dependencies, add it to the schedule and
2912    recursively resolve all its forward dependencies.  */
2913 static void
2914 resolve_dependencies (rtx insn)
2915 {
2916   sd_iterator_def sd_it;
2917   dep_t dep;
2918 
2919   /* Don't use sd_lists_empty_p; it ignores debug insns.  */
2920   if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (insn)) != NULL
2921       || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (insn)) != NULL)
2922     return;
2923 
2924   if (sched_verbose >= 4)
2925     fprintf (sched_dump, ";;\tquickly resolving %d\n", INSN_UID (insn));
2926 
2927   if (QUEUE_INDEX (insn) >= 0)
2928     queue_remove (insn);
2929 
2930   VEC_safe_push (rtx, heap, scheduled_insns, insn);
2931 
2932   /* Update dependent instructions.  */
2933   for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
2934        sd_iterator_cond (&sd_it, &dep);)
2935     {
2936       rtx next = DEP_CON (dep);
2937 
2938       if (sched_verbose >= 4)
2939 	fprintf (sched_dump, ";;\t\tdep %d against %d\n", INSN_UID (insn),
2940 		 INSN_UID (next));
2941 
2942       /* Resolve the dependence between INSN and NEXT.
2943 	 sd_resolve_dep () moves current dep to another list thus
2944 	 advancing the iterator.  */
2945       sd_resolve_dep (sd_it);
2946 
2947       if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
2948 	{
2949 	  resolve_dependencies (next);
2950 	}
2951       else
2952 	/* Check always has only one forward dependence (to the first insn in
2953 	   the recovery block), therefore, this will be executed only once.  */
2954 	{
2955 	  gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
2956 	}
2957     }
2958 }
2959 
2960 
2961 /* Return the head and tail pointers of ebb starting at BEG and ending
2962    at END.  */
2963 void
2964 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
2965 {
2966   rtx beg_head = BB_HEAD (beg);
2967   rtx beg_tail = BB_END (beg);
2968   rtx end_head = BB_HEAD (end);
2969   rtx end_tail = BB_END (end);
2970 
2971   /* Don't include any notes or labels at the beginning of the BEG
2972      basic block, or notes at the end of the END basic blocks.  */
2973 
2974   if (LABEL_P (beg_head))
2975     beg_head = NEXT_INSN (beg_head);
2976 
2977   while (beg_head != beg_tail)
2978     if (NOTE_P (beg_head))
2979       beg_head = NEXT_INSN (beg_head);
2980     else if (DEBUG_INSN_P (beg_head))
2981       {
2982 	rtx note, next;
2983 
2984 	for (note = NEXT_INSN (beg_head);
2985 	     note != beg_tail;
2986 	     note = next)
2987 	  {
2988 	    next = NEXT_INSN (note);
2989 	    if (NOTE_P (note))
2990 	      {
2991 		if (sched_verbose >= 9)
2992 		  fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
2993 
2994 		reorder_insns_nobb (note, note, PREV_INSN (beg_head));
2995 
2996 		if (BLOCK_FOR_INSN (note) != beg)
2997 		  df_insn_change_bb (note, beg);
2998 	      }
2999 	    else if (!DEBUG_INSN_P (note))
3000 	      break;
3001 	  }
3002 
3003 	break;
3004       }
3005     else
3006       break;
3007 
3008   *headp = beg_head;
3009 
3010   if (beg == end)
3011     end_head = beg_head;
3012   else if (LABEL_P (end_head))
3013     end_head = NEXT_INSN (end_head);
3014 
3015   while (end_head != end_tail)
3016     if (NOTE_P (end_tail))
3017       end_tail = PREV_INSN (end_tail);
3018     else if (DEBUG_INSN_P (end_tail))
3019       {
3020 	rtx note, prev;
3021 
3022 	for (note = PREV_INSN (end_tail);
3023 	     note != end_head;
3024 	     note = prev)
3025 	  {
3026 	    prev = PREV_INSN (note);
3027 	    if (NOTE_P (note))
3028 	      {
3029 		if (sched_verbose >= 9)
3030 		  fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
3031 
3032 		reorder_insns_nobb (note, note, end_tail);
3033 
3034 		if (end_tail == BB_END (end))
3035 		  BB_END (end) = note;
3036 
3037 		if (BLOCK_FOR_INSN (note) != end)
3038 		  df_insn_change_bb (note, end);
3039 	      }
3040 	    else if (!DEBUG_INSN_P (note))
3041 	      break;
3042 	  }
3043 
3044 	break;
3045       }
3046     else
3047       break;
3048 
3049   *tailp = end_tail;
3050 }
3051 
3052 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ].  */
3053 
3054 int
3055 no_real_insns_p (const_rtx head, const_rtx tail)
3056 {
3057   while (head != NEXT_INSN (tail))
3058     {
3059       if (!NOTE_P (head) && !LABEL_P (head))
3060 	return 0;
3061       head = NEXT_INSN (head);
3062     }
3063   return 1;
3064 }
3065 
3066 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
3067    previously found among the insns.  Insert them just before HEAD.  */
3068 rtx
3069 restore_other_notes (rtx head, basic_block head_bb)
3070 {
3071   if (note_list != 0)
3072     {
3073       rtx note_head = note_list;
3074 
3075       if (head)
3076 	head_bb = BLOCK_FOR_INSN (head);
3077       else
3078 	head = NEXT_INSN (bb_note (head_bb));
3079 
3080       while (PREV_INSN (note_head))
3081 	{
3082 	  set_block_for_insn (note_head, head_bb);
3083 	  note_head = PREV_INSN (note_head);
3084 	}
3085       /* In the above cycle we've missed this note.  */
3086       set_block_for_insn (note_head, head_bb);
3087 
3088       PREV_INSN (note_head) = PREV_INSN (head);
3089       NEXT_INSN (PREV_INSN (head)) = note_head;
3090       PREV_INSN (head) = note_list;
3091       NEXT_INSN (note_list) = head;
3092 
3093       if (BLOCK_FOR_INSN (head) != head_bb)
3094 	BB_END (head_bb) = note_list;
3095 
3096       head = note_head;
3097     }
3098 
3099   return head;
3100 }
3101 
3102 /* Move insns that became ready to fire from queue to ready list.  */
3103 
3104 static void
3105 queue_to_ready (struct ready_list *ready)
3106 {
3107   rtx insn;
3108   rtx link;
3109   rtx skip_insn;
3110 
3111   q_ptr = NEXT_Q (q_ptr);
3112 
3113   if (dbg_cnt (sched_insn) == false)
3114     {
3115       /* If debug counter is activated do not requeue the first
3116 	 nonscheduled insn.  */
3117       skip_insn = nonscheduled_insns_begin;
3118       do
3119 	{
3120 	  skip_insn = next_nonnote_nondebug_insn (skip_insn);
3121 	}
3122       while (QUEUE_INDEX (skip_insn) == QUEUE_SCHEDULED);
3123     }
3124   else
3125     skip_insn = NULL_RTX;
3126 
3127   /* Add all pending insns that can be scheduled without stalls to the
3128      ready list.  */
3129   for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
3130     {
3131       insn = XEXP (link, 0);
3132       q_size -= 1;
3133 
3134       if (sched_verbose >= 2)
3135 	fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
3136 		 (*current_sched_info->print_insn) (insn, 0));
3137 
3138       /* If the ready list is full, delay the insn for 1 cycle.
3139 	 See the comment in schedule_block for the rationale.  */
3140       if (!reload_completed
3141 	  && ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
3142 	  && !SCHED_GROUP_P (insn)
3143 	  && insn != skip_insn)
3144 	queue_insn (insn, 1, "ready full");
3145       else
3146 	{
3147 	  ready_add (ready, insn, false);
3148 	  if (sched_verbose >= 2)
3149 	    fprintf (sched_dump, "moving to ready without stalls\n");
3150         }
3151     }
3152   free_INSN_LIST_list (&insn_queue[q_ptr]);
3153 
3154   /* If there are no ready insns, stall until one is ready and add all
3155      of the pending insns at that point to the ready list.  */
3156   if (ready->n_ready == 0)
3157     {
3158       int stalls;
3159 
3160       for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
3161 	{
3162 	  if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
3163 	    {
3164 	      for (; link; link = XEXP (link, 1))
3165 		{
3166 		  insn = XEXP (link, 0);
3167 		  q_size -= 1;
3168 
3169 		  if (sched_verbose >= 2)
3170 		    fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
3171 			     (*current_sched_info->print_insn) (insn, 0));
3172 
3173 		  ready_add (ready, insn, false);
3174 		  if (sched_verbose >= 2)
3175 		    fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
3176 		}
3177 	      free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
3178 
3179 	      advance_one_cycle ();
3180 
3181 	      break;
3182 	    }
3183 
3184 	  advance_one_cycle ();
3185 	}
3186 
3187       q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
3188       clock_var += stalls;
3189     }
3190 }
3191 
3192 /* Used by early_queue_to_ready.  Determines whether it is "ok" to
3193    prematurely move INSN from the queue to the ready list.  Currently,
3194    if a target defines the hook 'is_costly_dependence', this function
3195    uses the hook to check whether there exist any dependences which are
3196    considered costly by the target, between INSN and other insns that
3197    have already been scheduled.  Dependences are checked up to Y cycles
3198    back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
3199    controlling this value.
3200    (Other considerations could be taken into account instead (or in
3201    addition) depending on user flags and target hooks.  */
3202 
3203 static bool
3204 ok_for_early_queue_removal (rtx insn)
3205 {
3206   if (targetm.sched.is_costly_dependence)
3207     {
3208       rtx prev_insn;
3209       int n_cycles;
3210       int i = VEC_length (rtx, scheduled_insns);
3211       for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
3212 	{
3213 	  while (i-- > 0)
3214 	    {
3215 	      int cost;
3216 
3217 	      prev_insn = VEC_index (rtx, scheduled_insns, i);
3218 
3219 	      if (!NOTE_P (prev_insn))
3220 		{
3221 		  dep_t dep;
3222 
3223 		  dep = sd_find_dep_between (prev_insn, insn, true);
3224 
3225 		  if (dep != NULL)
3226 		    {
3227 		      cost = dep_cost (dep);
3228 
3229 		      if (targetm.sched.is_costly_dependence (dep, cost,
3230 				flag_sched_stalled_insns_dep - n_cycles))
3231 			return false;
3232 		    }
3233 		}
3234 
3235 	      if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
3236 		break;
3237 	    }
3238 
3239 	  if (i == 0)
3240 	    break;
3241 	}
3242     }
3243 
3244   return true;
3245 }
3246 
3247 
3248 /* Remove insns from the queue, before they become "ready" with respect
3249    to FU latency considerations.  */
3250 
3251 static int
3252 early_queue_to_ready (state_t state, struct ready_list *ready)
3253 {
3254   rtx insn;
3255   rtx link;
3256   rtx next_link;
3257   rtx prev_link;
3258   bool move_to_ready;
3259   int cost;
3260   state_t temp_state = alloca (dfa_state_size);
3261   int stalls;
3262   int insns_removed = 0;
3263 
3264   /*
3265      Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
3266      function:
3267 
3268      X == 0: There is no limit on how many queued insns can be removed
3269              prematurely.  (flag_sched_stalled_insns = -1).
3270 
3271      X >= 1: Only X queued insns can be removed prematurely in each
3272 	     invocation.  (flag_sched_stalled_insns = X).
3273 
3274      Otherwise: Early queue removal is disabled.
3275          (flag_sched_stalled_insns = 0)
3276   */
3277 
3278   if (! flag_sched_stalled_insns)
3279     return 0;
3280 
3281   for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
3282     {
3283       if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
3284 	{
3285 	  if (sched_verbose > 6)
3286 	    fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
3287 
3288 	  prev_link = 0;
3289 	  while (link)
3290 	    {
3291 	      next_link = XEXP (link, 1);
3292 	      insn = XEXP (link, 0);
3293 	      if (insn && sched_verbose > 6)
3294 		print_rtl_single (sched_dump, insn);
3295 
3296 	      memcpy (temp_state, state, dfa_state_size);
3297 	      if (recog_memoized (insn) < 0)
3298 		/* non-negative to indicate that it's not ready
3299 		   to avoid infinite Q->R->Q->R... */
3300 		cost = 0;
3301 	      else
3302 		cost = state_transition (temp_state, insn);
3303 
3304 	      if (sched_verbose >= 6)
3305 		fprintf (sched_dump, "transition cost = %d\n", cost);
3306 
3307 	      move_to_ready = false;
3308 	      if (cost < 0)
3309 		{
3310 		  move_to_ready = ok_for_early_queue_removal (insn);
3311 		  if (move_to_ready == true)
3312 		    {
3313 		      /* move from Q to R */
3314 		      q_size -= 1;
3315 		      ready_add (ready, insn, false);
3316 
3317 		      if (prev_link)
3318 			XEXP (prev_link, 1) = next_link;
3319 		      else
3320 			insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
3321 
3322 		      free_INSN_LIST_node (link);
3323 
3324 		      if (sched_verbose >= 2)
3325 			fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
3326 				 (*current_sched_info->print_insn) (insn, 0));
3327 
3328 		      insns_removed++;
3329 		      if (insns_removed == flag_sched_stalled_insns)
3330 			/* Remove no more than flag_sched_stalled_insns insns
3331 			   from Q at a time.  */
3332 			return insns_removed;
3333 		    }
3334 		}
3335 
3336 	      if (move_to_ready == false)
3337 		prev_link = link;
3338 
3339 	      link = next_link;
3340 	    } /* while link */
3341 	} /* if link */
3342 
3343     } /* for stalls.. */
3344 
3345   return insns_removed;
3346 }
3347 
3348 
3349 /* Print the ready list for debugging purposes.  Callable from debugger.  */
3350 
3351 static void
3352 debug_ready_list (struct ready_list *ready)
3353 {
3354   rtx *p;
3355   int i;
3356 
3357   if (ready->n_ready == 0)
3358     {
3359       fprintf (sched_dump, "\n");
3360       return;
3361     }
3362 
3363   p = ready_lastpos (ready);
3364   for (i = 0; i < ready->n_ready; i++)
3365     {
3366       fprintf (sched_dump, "  %s:%d",
3367 	       (*current_sched_info->print_insn) (p[i], 0),
3368 	       INSN_LUID (p[i]));
3369       if (sched_pressure_p)
3370 	fprintf (sched_dump, "(cost=%d",
3371 		 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
3372       if (INSN_TICK (p[i]) > clock_var)
3373 	fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
3374       if (sched_pressure_p)
3375 	fprintf (sched_dump, ")");
3376     }
3377   fprintf (sched_dump, "\n");
3378 }
3379 
3380 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
3381    NOTEs.  This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
3382    replaces the epilogue note in the correct basic block.  */
3383 void
3384 reemit_notes (rtx insn)
3385 {
3386   rtx note, last = insn;
3387 
3388   for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
3389     {
3390       if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
3391 	{
3392 	  enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
3393 
3394 	  last = emit_note_before (note_type, last);
3395 	  remove_note (insn, note);
3396 	}
3397     }
3398 }
3399 
3400 /* Move INSN.  Reemit notes if needed.  Update CFG, if needed.  */
3401 static void
3402 move_insn (rtx insn, rtx last, rtx nt)
3403 {
3404   if (PREV_INSN (insn) != last)
3405     {
3406       basic_block bb;
3407       rtx note;
3408       int jump_p = 0;
3409 
3410       bb = BLOCK_FOR_INSN (insn);
3411 
3412       /* BB_HEAD is either LABEL or NOTE.  */
3413       gcc_assert (BB_HEAD (bb) != insn);
3414 
3415       if (BB_END (bb) == insn)
3416 	/* If this is last instruction in BB, move end marker one
3417 	   instruction up.  */
3418 	{
3419 	  /* Jumps are always placed at the end of basic block.  */
3420 	  jump_p = control_flow_insn_p (insn);
3421 
3422 	  gcc_assert (!jump_p
3423 		      || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
3424 			  && IS_SPECULATION_BRANCHY_CHECK_P (insn))
3425 		      || (common_sched_info->sched_pass_id
3426 			  == SCHED_EBB_PASS));
3427 
3428 	  gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
3429 
3430 	  BB_END (bb) = PREV_INSN (insn);
3431 	}
3432 
3433       gcc_assert (BB_END (bb) != last);
3434 
3435       if (jump_p)
3436 	/* We move the block note along with jump.  */
3437 	{
3438 	  gcc_assert (nt);
3439 
3440 	  note = NEXT_INSN (insn);
3441 	  while (NOTE_NOT_BB_P (note) && note != nt)
3442 	    note = NEXT_INSN (note);
3443 
3444 	  if (note != nt
3445 	      && (LABEL_P (note)
3446 		  || BARRIER_P (note)))
3447 	    note = NEXT_INSN (note);
3448 
3449 	  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
3450 	}
3451       else
3452 	note = insn;
3453 
3454       NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
3455       PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
3456 
3457       NEXT_INSN (note) = NEXT_INSN (last);
3458       PREV_INSN (NEXT_INSN (last)) = note;
3459 
3460       NEXT_INSN (last) = insn;
3461       PREV_INSN (insn) = last;
3462 
3463       bb = BLOCK_FOR_INSN (last);
3464 
3465       if (jump_p)
3466 	{
3467 	  fix_jump_move (insn);
3468 
3469 	  if (BLOCK_FOR_INSN (insn) != bb)
3470 	    move_block_after_check (insn);
3471 
3472 	  gcc_assert (BB_END (bb) == last);
3473 	}
3474 
3475       df_insn_change_bb (insn, bb);
3476 
3477       /* Update BB_END, if needed.  */
3478       if (BB_END (bb) == last)
3479 	BB_END (bb) = insn;
3480     }
3481 
3482   SCHED_GROUP_P (insn) = 0;
3483 }
3484 
3485 /* Return true if scheduling INSN will finish current clock cycle.  */
3486 static bool
3487 insn_finishes_cycle_p (rtx insn)
3488 {
3489   if (SCHED_GROUP_P (insn))
3490     /* After issuing INSN, rest of the sched_group will be forced to issue
3491        in order.  Don't make any plans for the rest of cycle.  */
3492     return true;
3493 
3494   /* Finishing the block will, apparently, finish the cycle.  */
3495   if (current_sched_info->insn_finishes_block_p
3496       && current_sched_info->insn_finishes_block_p (insn))
3497     return true;
3498 
3499   return false;
3500 }
3501 
3502 /* Define type for target data used in multipass scheduling.  */
3503 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
3504 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
3505 #endif
3506 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t;
3507 
3508 /* The following structure describe an entry of the stack of choices.  */
3509 struct choice_entry
3510 {
3511   /* Ordinal number of the issued insn in the ready queue.  */
3512   int index;
3513   /* The number of the rest insns whose issues we should try.  */
3514   int rest;
3515   /* The number of issued essential insns.  */
3516   int n;
3517   /* State after issuing the insn.  */
3518   state_t state;
3519   /* Target-specific data.  */
3520   first_cycle_multipass_data_t target_data;
3521 };
3522 
3523 /* The following array is used to implement a stack of choices used in
3524    function max_issue.  */
3525 static struct choice_entry *choice_stack;
3526 
3527 /* This holds the value of the target dfa_lookahead hook.  */
3528 int dfa_lookahead;
3529 
3530 /* The following variable value is maximal number of tries of issuing
3531    insns for the first cycle multipass insn scheduling.  We define
3532    this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE).  We would not
3533    need this constraint if all real insns (with non-negative codes)
3534    had reservations because in this case the algorithm complexity is
3535    O(DFA_LOOKAHEAD**ISSUE_RATE).  Unfortunately, the dfa descriptions
3536    might be incomplete and such insn might occur.  For such
3537    descriptions, the complexity of algorithm (without the constraint)
3538    could achieve DFA_LOOKAHEAD ** N , where N is the queue length.  */
3539 static int max_lookahead_tries;
3540 
3541 /* The following value is value of hook
3542    `first_cycle_multipass_dfa_lookahead' at the last call of
3543    `max_issue'.  */
3544 static int cached_first_cycle_multipass_dfa_lookahead = 0;
3545 
3546 /* The following value is value of `issue_rate' at the last call of
3547    `sched_init'.  */
3548 static int cached_issue_rate = 0;
3549 
3550 /* The following function returns maximal (or close to maximal) number
3551    of insns which can be issued on the same cycle and one of which
3552    insns is insns with the best rank (the first insn in READY).  To
3553    make this function tries different samples of ready insns.  READY
3554    is current queue `ready'.  Global array READY_TRY reflects what
3555    insns are already issued in this try.  The function stops immediately,
3556    if it reached the such a solution, that all instruction can be issued.
3557    INDEX will contain index of the best insn in READY.  The following
3558    function is used only for first cycle multipass scheduling.
3559 
3560    PRIVILEGED_N >= 0
3561 
3562    This function expects recognized insns only.  All USEs,
3563    CLOBBERs, etc must be filtered elsewhere.  */
3564 int
3565 max_issue (struct ready_list *ready, int privileged_n, state_t state,
3566 	   bool first_cycle_insn_p, int *index)
3567 {
3568   int n, i, all, n_ready, best, delay, tries_num;
3569   int more_issue;
3570   struct choice_entry *top;
3571   rtx insn;
3572 
3573   n_ready = ready->n_ready;
3574   gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
3575 	      && privileged_n <= n_ready);
3576 
3577   /* Init MAX_LOOKAHEAD_TRIES.  */
3578   if (cached_first_cycle_multipass_dfa_lookahead != dfa_lookahead)
3579     {
3580       cached_first_cycle_multipass_dfa_lookahead = dfa_lookahead;
3581       max_lookahead_tries = 100;
3582       for (i = 0; i < issue_rate; i++)
3583 	max_lookahead_tries *= dfa_lookahead;
3584     }
3585 
3586   /* Init max_points.  */
3587   more_issue = issue_rate - cycle_issued_insns;
3588   gcc_assert (more_issue >= 0);
3589 
3590   /* The number of the issued insns in the best solution.  */
3591   best = 0;
3592 
3593   top = choice_stack;
3594 
3595   /* Set initial state of the search.  */
3596   memcpy (top->state, state, dfa_state_size);
3597   top->rest = dfa_lookahead;
3598   top->n = 0;
3599   if (targetm.sched.first_cycle_multipass_begin)
3600     targetm.sched.first_cycle_multipass_begin (&top->target_data,
3601 					       ready_try, n_ready,
3602 					       first_cycle_insn_p);
3603 
3604   /* Count the number of the insns to search among.  */
3605   for (all = i = 0; i < n_ready; i++)
3606     if (!ready_try [i])
3607       all++;
3608 
3609   /* I is the index of the insn to try next.  */
3610   i = 0;
3611   tries_num = 0;
3612   for (;;)
3613     {
3614       if (/* If we've reached a dead end or searched enough of what we have
3615 	     been asked...  */
3616 	  top->rest == 0
3617 	  /* or have nothing else to try...  */
3618 	  || i >= n_ready
3619 	  /* or should not issue more.  */
3620 	  || top->n >= more_issue)
3621 	{
3622 	  /* ??? (... || i == n_ready).  */
3623 	  gcc_assert (i <= n_ready);
3624 
3625 	  /* We should not issue more than issue_rate instructions.  */
3626 	  gcc_assert (top->n <= more_issue);
3627 
3628 	  if (top == choice_stack)
3629 	    break;
3630 
3631 	  if (best < top - choice_stack)
3632 	    {
3633 	      if (privileged_n)
3634 		{
3635 		  n = privileged_n;
3636 		  /* Try to find issued privileged insn.  */
3637 		  while (n && !ready_try[--n])
3638 		    ;
3639 		}
3640 
3641 	      if (/* If all insns are equally good...  */
3642 		  privileged_n == 0
3643 		  /* Or a privileged insn will be issued.  */
3644 		  || ready_try[n])
3645 		/* Then we have a solution.  */
3646 		{
3647 		  best = top - choice_stack;
3648 		  /* This is the index of the insn issued first in this
3649 		     solution.  */
3650 		  *index = choice_stack [1].index;
3651 		  if (top->n == more_issue || best == all)
3652 		    break;
3653 		}
3654 	    }
3655 
3656 	  /* Set ready-list index to point to the last insn
3657 	     ('i++' below will advance it to the next insn).  */
3658 	  i = top->index;
3659 
3660 	  /* Backtrack.  */
3661 	  ready_try [i] = 0;
3662 
3663 	  if (targetm.sched.first_cycle_multipass_backtrack)
3664 	    targetm.sched.first_cycle_multipass_backtrack (&top->target_data,
3665 							   ready_try, n_ready);
3666 
3667 	  top--;
3668 	  memcpy (state, top->state, dfa_state_size);
3669 	}
3670       else if (!ready_try [i])
3671 	{
3672 	  tries_num++;
3673 	  if (tries_num > max_lookahead_tries)
3674 	    break;
3675 	  insn = ready_element (ready, i);
3676 	  delay = state_transition (state, insn);
3677 	  if (delay < 0)
3678 	    {
3679 	      if (state_dead_lock_p (state)
3680 		  || insn_finishes_cycle_p (insn))
3681 		/* We won't issue any more instructions in the next
3682 		   choice_state.  */
3683 		top->rest = 0;
3684 	      else
3685 		top->rest--;
3686 
3687 	      n = top->n;
3688 	      if (memcmp (top->state, state, dfa_state_size) != 0)
3689 		n++;
3690 
3691 	      /* Advance to the next choice_entry.  */
3692 	      top++;
3693 	      /* Initialize it.  */
3694 	      top->rest = dfa_lookahead;
3695 	      top->index = i;
3696 	      top->n = n;
3697 	      memcpy (top->state, state, dfa_state_size);
3698 	      ready_try [i] = 1;
3699 
3700 	      if (targetm.sched.first_cycle_multipass_issue)
3701 		targetm.sched.first_cycle_multipass_issue (&top->target_data,
3702 							   ready_try, n_ready,
3703 							   insn,
3704 							   &((top - 1)
3705 							     ->target_data));
3706 
3707 	      i = -1;
3708 	    }
3709 	}
3710 
3711       /* Increase ready-list index.  */
3712       i++;
3713     }
3714 
3715   if (targetm.sched.first_cycle_multipass_end)
3716     targetm.sched.first_cycle_multipass_end (best != 0
3717 					     ? &choice_stack[1].target_data
3718 					     : NULL);
3719 
3720   /* Restore the original state of the DFA.  */
3721   memcpy (state, choice_stack->state, dfa_state_size);
3722 
3723   return best;
3724 }
3725 
3726 /* The following function chooses insn from READY and modifies
3727    READY.  The following function is used only for first
3728    cycle multipass scheduling.
3729    Return:
3730    -1 if cycle should be advanced,
3731    0 if INSN_PTR is set to point to the desirable insn,
3732    1 if choose_ready () should be restarted without advancing the cycle.  */
3733 static int
3734 choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
3735 	      rtx *insn_ptr)
3736 {
3737   int lookahead;
3738 
3739   if (dbg_cnt (sched_insn) == false)
3740     {
3741       rtx insn = nonscheduled_insns_begin;
3742       do
3743 	{
3744 	  insn = next_nonnote_insn (insn);
3745 	}
3746       while (QUEUE_INDEX (insn) == QUEUE_SCHEDULED);
3747 
3748       if (QUEUE_INDEX (insn) == QUEUE_READY)
3749 	/* INSN is in the ready_list.  */
3750 	{
3751 	  nonscheduled_insns_begin = insn;
3752 	  ready_remove_insn (insn);
3753 	  *insn_ptr = insn;
3754 	  return 0;
3755 	}
3756 
3757       /* INSN is in the queue.  Advance cycle to move it to the ready list.  */
3758       return -1;
3759     }
3760 
3761   lookahead = 0;
3762 
3763   if (targetm.sched.first_cycle_multipass_dfa_lookahead)
3764     lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
3765   if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
3766       || DEBUG_INSN_P (ready_element (ready, 0)))
3767     {
3768       if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
3769 	*insn_ptr = ready_remove_first_dispatch (ready);
3770       else
3771 	*insn_ptr = ready_remove_first (ready);
3772 
3773       return 0;
3774     }
3775   else
3776     {
3777       /* Try to choose the better insn.  */
3778       int index = 0, i, n;
3779       rtx insn;
3780       int try_data = 1, try_control = 1;
3781       ds_t ts;
3782 
3783       insn = ready_element (ready, 0);
3784       if (INSN_CODE (insn) < 0)
3785 	{
3786 	  *insn_ptr = ready_remove_first (ready);
3787 	  return 0;
3788 	}
3789 
3790       if (spec_info
3791 	  && spec_info->flags & (PREFER_NON_DATA_SPEC
3792 				 | PREFER_NON_CONTROL_SPEC))
3793 	{
3794 	  for (i = 0, n = ready->n_ready; i < n; i++)
3795 	    {
3796 	      rtx x;
3797 	      ds_t s;
3798 
3799 	      x = ready_element (ready, i);
3800 	      s = TODO_SPEC (x);
3801 
3802 	      if (spec_info->flags & PREFER_NON_DATA_SPEC
3803 		  && !(s & DATA_SPEC))
3804 		{
3805 		  try_data = 0;
3806 		  if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
3807 		      || !try_control)
3808 		    break;
3809 		}
3810 
3811 	      if (spec_info->flags & PREFER_NON_CONTROL_SPEC
3812 		  && !(s & CONTROL_SPEC))
3813 		{
3814 		  try_control = 0;
3815 		  if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
3816 		    break;
3817 		}
3818 	    }
3819 	}
3820 
3821       ts = TODO_SPEC (insn);
3822       if ((ts & SPECULATIVE)
3823 	  && (((!try_data && (ts & DATA_SPEC))
3824 	       || (!try_control && (ts & CONTROL_SPEC)))
3825 	      || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
3826 		  && !targetm.sched
3827 		  .first_cycle_multipass_dfa_lookahead_guard_spec (insn))))
3828 	/* Discard speculative instruction that stands first in the ready
3829 	   list.  */
3830 	{
3831 	  change_queue_index (insn, 1);
3832 	  return 1;
3833 	}
3834 
3835       ready_try[0] = 0;
3836 
3837       for (i = 1; i < ready->n_ready; i++)
3838 	{
3839 	  insn = ready_element (ready, i);
3840 
3841 	  ready_try [i]
3842 	    = ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
3843                || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC)));
3844 	}
3845 
3846       /* Let the target filter the search space.  */
3847       for (i = 1; i < ready->n_ready; i++)
3848 	if (!ready_try[i])
3849 	  {
3850 	    insn = ready_element (ready, i);
3851 
3852 	    /* If this insn is recognizable we should have already
3853 	       recognized it earlier.
3854 	       ??? Not very clear where this is supposed to be done.
3855 	       See dep_cost_1.  */
3856 	    gcc_checking_assert (INSN_CODE (insn) >= 0
3857 				 || recog_memoized (insn) < 0);
3858 
3859 	    ready_try [i]
3860 	      = (/* INSN_CODE check can be omitted here as it is also done later
3861 		    in max_issue ().  */
3862 		 INSN_CODE (insn) < 0
3863 		 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
3864 		     && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
3865 		     (insn)));
3866 	  }
3867 
3868       if (max_issue (ready, 1, curr_state, first_cycle_insn_p, &index) == 0)
3869 	{
3870 	  *insn_ptr = ready_remove_first (ready);
3871 	  if (sched_verbose >= 4)
3872 	    fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
3873                      (*current_sched_info->print_insn) (*insn_ptr, 0));
3874 	  return 0;
3875 	}
3876       else
3877 	{
3878 	  if (sched_verbose >= 4)
3879 	    fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
3880 		     (*current_sched_info->print_insn)
3881 		     (ready_element (ready, index), 0));
3882 
3883 	  *insn_ptr = ready_remove (ready, index);
3884 	  return 0;
3885 	}
3886     }
3887 }
3888 
3889 /* This function is called when we have successfully scheduled a
3890    block.  It uses the schedule stored in the scheduled_insns vector
3891    to rearrange the RTL.  PREV_HEAD is used as the anchor to which we
3892    append the scheduled insns; TAIL is the insn after the scheduled
3893    block.  TARGET_BB is the argument passed to schedule_block.  */
3894 
3895 static void
3896 commit_schedule (rtx prev_head, rtx tail, basic_block *target_bb)
3897 {
3898   unsigned int i;
3899   rtx insn;
3900 
3901   last_scheduled_insn = prev_head;
3902   for (i = 0;
3903        VEC_iterate (rtx, scheduled_insns, i, insn);
3904        i++)
3905     {
3906       if (control_flow_insn_p (last_scheduled_insn)
3907 	  || current_sched_info->advance_target_bb (*target_bb, insn))
3908 	{
3909 	  *target_bb = current_sched_info->advance_target_bb (*target_bb, 0);
3910 
3911 	  if (sched_verbose)
3912 	    {
3913 	      rtx x;
3914 
3915 	      x = next_real_insn (last_scheduled_insn);
3916 	      gcc_assert (x);
3917 	      dump_new_block_header (1, *target_bb, x, tail);
3918 	    }
3919 
3920 	  last_scheduled_insn = bb_note (*target_bb);
3921 	}
3922 
3923       if (current_sched_info->begin_move_insn)
3924 	(*current_sched_info->begin_move_insn) (insn, last_scheduled_insn);
3925       move_insn (insn, last_scheduled_insn,
3926 		 current_sched_info->next_tail);
3927       if (!DEBUG_INSN_P (insn))
3928 	reemit_notes (insn);
3929       last_scheduled_insn = insn;
3930     }
3931 
3932   VEC_truncate (rtx, scheduled_insns, 0);
3933 }
3934 
3935 /* Examine all insns on the ready list and queue those which can't be
3936    issued in this cycle.  TEMP_STATE is temporary scheduler state we
3937    can use as scratch space.  If FIRST_CYCLE_INSN_P is true, no insns
3938    have been issued for the current cycle, which means it is valid to
3939    issue an asm statement.
3940 
3941    If SHADOWS_ONLY_P is true, we eliminate all real insns and only
3942    leave those for which SHADOW_P is true.  If MODULO_EPILOGUE is true,
3943    we only leave insns which have an INSN_EXACT_TICK.  */
3944 
3945 static void
3946 prune_ready_list (state_t temp_state, bool first_cycle_insn_p,
3947 		  bool shadows_only_p, bool modulo_epilogue_p)
3948 {
3949   int i;
3950   bool sched_group_found = false;
3951 
3952  restart:
3953   for (i = 0; i < ready.n_ready; i++)
3954     {
3955       rtx insn = ready_element (&ready, i);
3956       int cost = 0;
3957       const char *reason = "resource conflict";
3958 
3959       if (DEBUG_INSN_P (insn))
3960 	continue;
3961 
3962       if (SCHED_GROUP_P (insn) && !sched_group_found)
3963 	{
3964 	  sched_group_found = true;
3965 	  if (i > 0)
3966 	    goto restart;
3967 	}
3968 
3969       if (sched_group_found && !SCHED_GROUP_P (insn))
3970 	{
3971 	  cost = 1;
3972 	  reason = "not in sched group";
3973 	}
3974       else if (modulo_epilogue_p && INSN_EXACT_TICK (insn) == INVALID_TICK)
3975 	{
3976 	  cost = max_insn_queue_index;
3977 	  reason = "not an epilogue insn";
3978 	}
3979       else if (shadows_only_p && !SHADOW_P (insn))
3980 	{
3981 	  cost = 1;
3982 	  reason = "not a shadow";
3983 	}
3984       else if (recog_memoized (insn) < 0)
3985 	{
3986 	  if (!first_cycle_insn_p
3987 	      && (GET_CODE (PATTERN (insn)) == ASM_INPUT
3988 		  || asm_noperands (PATTERN (insn)) >= 0))
3989 	    cost = 1;
3990 	  reason = "asm";
3991 	}
3992       else if (sched_pressure_p)
3993 	cost = 0;
3994       else
3995 	{
3996 	  int delay_cost = 0;
3997 
3998 	  if (delay_htab)
3999 	    {
4000 	      struct delay_pair *delay_entry;
4001 	      delay_entry
4002 		= (struct delay_pair *)htab_find_with_hash (delay_htab, insn,
4003 							    htab_hash_pointer (insn));
4004 	      while (delay_entry && delay_cost == 0)
4005 		{
4006 		  delay_cost = estimate_shadow_tick (delay_entry);
4007 		  if (delay_cost > max_insn_queue_index)
4008 		    delay_cost = max_insn_queue_index;
4009 		  delay_entry = delay_entry->next_same_i1;
4010 		}
4011 	    }
4012 
4013 	  memcpy (temp_state, curr_state, dfa_state_size);
4014 	  cost = state_transition (temp_state, insn);
4015 	  if (cost < 0)
4016 	    cost = 0;
4017 	  else if (cost == 0)
4018 	    cost = 1;
4019 	  if (cost < delay_cost)
4020 	    {
4021 	      cost = delay_cost;
4022 	      reason = "shadow tick";
4023 	    }
4024 	}
4025       if (cost >= 1)
4026 	{
4027 	  ready_remove (&ready, i);
4028 	  queue_insn (insn, cost, reason);
4029 	  goto restart;
4030 	}
4031     }
4032 }
4033 
4034 /* Called when we detect that the schedule is impossible.  We examine the
4035    backtrack queue to find the earliest insn that caused this condition.  */
4036 
4037 static struct haifa_saved_data *
4038 verify_shadows (void)
4039 {
4040   struct haifa_saved_data *save, *earliest_fail = NULL;
4041   for (save = backtrack_queue; save; save = save->next)
4042     {
4043       int t;
4044       struct delay_pair *pair = save->delay_pair;
4045       rtx i1 = pair->i1;
4046 
4047       for (; pair; pair = pair->next_same_i1)
4048 	{
4049 	  rtx i2 = pair->i2;
4050 
4051 	  if (QUEUE_INDEX (i2) == QUEUE_SCHEDULED)
4052 	    continue;
4053 
4054 	  t = INSN_TICK (i1) + pair_delay (pair);
4055 	  if (t < clock_var)
4056 	    {
4057 	      if (sched_verbose >= 2)
4058 		fprintf (sched_dump,
4059 			 ";;\t\tfailed delay requirements for %d/%d (%d->%d)"
4060 			 ", not ready\n",
4061 			 INSN_UID (pair->i1), INSN_UID (pair->i2),
4062 			 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
4063 	      earliest_fail = save;
4064 	      break;
4065 	    }
4066 	  if (QUEUE_INDEX (i2) >= 0)
4067 	    {
4068 	      int queued_for = INSN_TICK (i2);
4069 
4070 	      if (t < queued_for)
4071 		{
4072 		  if (sched_verbose >= 2)
4073 		    fprintf (sched_dump,
4074 			     ";;\t\tfailed delay requirements for %d/%d"
4075 			     " (%d->%d), queued too late\n",
4076 			     INSN_UID (pair->i1), INSN_UID (pair->i2),
4077 			     INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
4078 		  earliest_fail = save;
4079 		  break;
4080 		}
4081 	    }
4082 	}
4083     }
4084 
4085   return earliest_fail;
4086 }
4087 
4088 /* Use forward list scheduling to rearrange insns of block pointed to by
4089    TARGET_BB, possibly bringing insns from subsequent blocks in the same
4090    region.  */
4091 
4092 bool
4093 schedule_block (basic_block *target_bb)
4094 {
4095   int i;
4096   bool success = modulo_ii == 0;
4097   struct sched_block_state ls;
4098   state_t temp_state = NULL;  /* It is used for multipass scheduling.  */
4099   int sort_p, advance, start_clock_var;
4100 
4101   /* Head/tail info for this block.  */
4102   rtx prev_head = current_sched_info->prev_head;
4103   rtx next_tail = current_sched_info->next_tail;
4104   rtx head = NEXT_INSN (prev_head);
4105   rtx tail = PREV_INSN (next_tail);
4106 
4107   /* We used to have code to avoid getting parameters moved from hard
4108      argument registers into pseudos.
4109 
4110      However, it was removed when it proved to be of marginal benefit
4111      and caused problems because schedule_block and compute_forward_dependences
4112      had different notions of what the "head" insn was.  */
4113 
4114   gcc_assert (head != tail || INSN_P (head));
4115 
4116   haifa_recovery_bb_recently_added_p = false;
4117 
4118   backtrack_queue = NULL;
4119 
4120   /* Debug info.  */
4121   if (sched_verbose)
4122     dump_new_block_header (0, *target_bb, head, tail);
4123 
4124   state_reset (curr_state);
4125 
4126   /* Clear the ready list.  */
4127   ready.first = ready.veclen - 1;
4128   ready.n_ready = 0;
4129   ready.n_debug = 0;
4130 
4131   /* It is used for first cycle multipass scheduling.  */
4132   temp_state = alloca (dfa_state_size);
4133 
4134   if (targetm.sched.init)
4135     targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
4136 
4137   /* We start inserting insns after PREV_HEAD.  */
4138   last_scheduled_insn = nonscheduled_insns_begin = prev_head;
4139   last_nondebug_scheduled_insn = NULL_RTX;
4140 
4141   gcc_assert ((NOTE_P (last_scheduled_insn)
4142 	       || DEBUG_INSN_P (last_scheduled_insn))
4143 	      && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
4144 
4145   /* Initialize INSN_QUEUE.  Q_SIZE is the total number of insns in the
4146      queue.  */
4147   q_ptr = 0;
4148   q_size = 0;
4149 
4150   insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1);
4151   memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
4152 
4153   /* Start just before the beginning of time.  */
4154   clock_var = -1;
4155 
4156   /* We need queue and ready lists and clock_var be initialized
4157      in try_ready () (which is called through init_ready_list ()).  */
4158   (*current_sched_info->init_ready_list) ();
4159 
4160   /* The algorithm is O(n^2) in the number of ready insns at any given
4161      time in the worst case.  Before reload we are more likely to have
4162      big lists so truncate them to a reasonable size.  */
4163   if (!reload_completed
4164       && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
4165     {
4166       ready_sort (&ready);
4167 
4168       /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
4169          If there are debug insns, we know they're first.  */
4170       for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
4171 	if (!SCHED_GROUP_P (ready_element (&ready, i)))
4172 	  break;
4173 
4174       if (sched_verbose >= 2)
4175 	{
4176 	  fprintf (sched_dump,
4177 		   ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
4178 	  fprintf (sched_dump,
4179 		   ";;\t\t before reload => truncated to %d insns\n", i);
4180 	}
4181 
4182       /* Delay all insns past it for 1 cycle.  If debug counter is
4183 	 activated make an exception for the insn right after
4184 	 nonscheduled_insns_begin.  */
4185       {
4186 	rtx skip_insn;
4187 
4188 	if (dbg_cnt (sched_insn) == false)
4189 	  skip_insn = next_nonnote_insn (nonscheduled_insns_begin);
4190 	else
4191 	  skip_insn = NULL_RTX;
4192 
4193 	while (i < ready.n_ready)
4194 	  {
4195 	    rtx insn;
4196 
4197 	    insn = ready_remove (&ready, i);
4198 
4199 	    if (insn != skip_insn)
4200 	      queue_insn (insn, 1, "list truncated");
4201 	  }
4202 	if (skip_insn)
4203 	  ready_add (&ready, skip_insn, true);
4204       }
4205     }
4206 
4207   /* Now we can restore basic block notes and maintain precise cfg.  */
4208   restore_bb_notes (*target_bb);
4209 
4210   last_clock_var = -1;
4211 
4212   advance = 0;
4213 
4214   gcc_assert (VEC_length (rtx, scheduled_insns) == 0);
4215   sort_p = TRUE;
4216   must_backtrack = false;
4217   modulo_insns_scheduled = 0;
4218 
4219   ls.modulo_epilogue = false;
4220 
4221   /* Loop until all the insns in BB are scheduled.  */
4222   while ((*current_sched_info->schedule_more_p) ())
4223     {
4224       do
4225 	{
4226 	  start_clock_var = clock_var;
4227 
4228 	  clock_var++;
4229 
4230 	  advance_one_cycle ();
4231 
4232 	  /* Add to the ready list all pending insns that can be issued now.
4233 	     If there are no ready insns, increment clock until one
4234 	     is ready and add all pending insns at that point to the ready
4235 	     list.  */
4236 	  queue_to_ready (&ready);
4237 
4238 	  gcc_assert (ready.n_ready);
4239 
4240 	  if (sched_verbose >= 2)
4241 	    {
4242 	      fprintf (sched_dump, ";;\t\tReady list after queue_to_ready:  ");
4243 	      debug_ready_list (&ready);
4244 	    }
4245 	  advance -= clock_var - start_clock_var;
4246 	}
4247       while (advance > 0);
4248 
4249       if (ls.modulo_epilogue)
4250 	{
4251 	  int stage = clock_var / modulo_ii;
4252 	  if (stage > modulo_last_stage * 2 + 2)
4253 	    {
4254 	      if (sched_verbose >= 2)
4255 		fprintf (sched_dump,
4256 			 ";;\t\tmodulo scheduled succeeded at II %d\n",
4257 			 modulo_ii);
4258 	      success = true;
4259 	      goto end_schedule;
4260 	    }
4261 	}
4262       else if (modulo_ii > 0)
4263 	{
4264 	  int stage = clock_var / modulo_ii;
4265 	  if (stage > modulo_max_stages)
4266 	    {
4267 	      if (sched_verbose >= 2)
4268 		fprintf (sched_dump,
4269 			 ";;\t\tfailing schedule due to excessive stages\n");
4270 	      goto end_schedule;
4271 	    }
4272 	  if (modulo_n_insns == modulo_insns_scheduled
4273 	      && stage > modulo_last_stage)
4274 	    {
4275 	      if (sched_verbose >= 2)
4276 		fprintf (sched_dump,
4277 			 ";;\t\tfound kernel after %d stages, II %d\n",
4278 			 stage, modulo_ii);
4279 	      ls.modulo_epilogue = true;
4280 	    }
4281 	}
4282 
4283       prune_ready_list (temp_state, true, false, ls.modulo_epilogue);
4284       if (ready.n_ready == 0)
4285 	continue;
4286       if (must_backtrack)
4287 	goto do_backtrack;
4288 
4289       ls.first_cycle_insn_p = true;
4290       ls.shadows_only_p = false;
4291       cycle_issued_insns = 0;
4292       ls.can_issue_more = issue_rate;
4293       for (;;)
4294 	{
4295 	  rtx insn;
4296 	  int cost;
4297 	  bool asm_p;
4298 
4299 	  if (sort_p && ready.n_ready > 0)
4300 	    {
4301 	      /* Sort the ready list based on priority.  This must be
4302 		 done every iteration through the loop, as schedule_insn
4303 		 may have readied additional insns that will not be
4304 		 sorted correctly.  */
4305 	      ready_sort (&ready);
4306 
4307 	      if (sched_verbose >= 2)
4308 		{
4309 		  fprintf (sched_dump, ";;\t\tReady list after ready_sort:  ");
4310 		  debug_ready_list (&ready);
4311 		}
4312 	    }
4313 
4314 	  /* We don't want md sched reorder to even see debug isns, so put
4315 	     them out right away.  */
4316 	  if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
4317 	      && (*current_sched_info->schedule_more_p) ())
4318 	    {
4319 	      while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
4320 		{
4321 		  rtx insn = ready_remove_first (&ready);
4322 		  gcc_assert (DEBUG_INSN_P (insn));
4323 		  (*current_sched_info->begin_schedule_ready) (insn);
4324 		  VEC_safe_push (rtx, heap, scheduled_insns, insn);
4325 		  last_scheduled_insn = insn;
4326 		  advance = schedule_insn (insn);
4327 		  gcc_assert (advance == 0);
4328 		  if (ready.n_ready > 0)
4329 		    ready_sort (&ready);
4330 		}
4331 	    }
4332 
4333 	  if (ls.first_cycle_insn_p && !ready.n_ready)
4334 	    break;
4335 
4336 	resume_after_backtrack:
4337 	  /* Allow the target to reorder the list, typically for
4338 	     better instruction bundling.  */
4339 	  if (sort_p
4340 	      && (ready.n_ready == 0
4341 		  || !SCHED_GROUP_P (ready_element (&ready, 0))))
4342 	    {
4343 	      if (ls.first_cycle_insn_p && targetm.sched.reorder)
4344 		ls.can_issue_more
4345 		  = targetm.sched.reorder (sched_dump, sched_verbose,
4346 					   ready_lastpos (&ready),
4347 					   &ready.n_ready, clock_var);
4348 	      else if (!ls.first_cycle_insn_p && targetm.sched.reorder2)
4349 		ls.can_issue_more
4350 		  = targetm.sched.reorder2 (sched_dump, sched_verbose,
4351 					    ready.n_ready
4352 					    ? ready_lastpos (&ready) : NULL,
4353 					    &ready.n_ready, clock_var);
4354 	    }
4355 
4356 	restart_choose_ready:
4357 	  if (sched_verbose >= 2)
4358 	    {
4359 	      fprintf (sched_dump, ";;\tReady list (t = %3d):  ",
4360 		       clock_var);
4361 	      debug_ready_list (&ready);
4362 	      if (sched_pressure_p)
4363 		print_curr_reg_pressure ();
4364 	    }
4365 
4366 	  if (ready.n_ready == 0
4367 	      && ls.can_issue_more
4368 	      && reload_completed)
4369 	    {
4370 	      /* Allow scheduling insns directly from the queue in case
4371 		 there's nothing better to do (ready list is empty) but
4372 		 there are still vacant dispatch slots in the current cycle.  */
4373 	      if (sched_verbose >= 6)
4374 		fprintf (sched_dump,";;\t\tSecond chance\n");
4375 	      memcpy (temp_state, curr_state, dfa_state_size);
4376 	      if (early_queue_to_ready (temp_state, &ready))
4377 		ready_sort (&ready);
4378 	    }
4379 
4380 	  if (ready.n_ready == 0
4381 	      || !ls.can_issue_more
4382 	      || state_dead_lock_p (curr_state)
4383 	      || !(*current_sched_info->schedule_more_p) ())
4384 	    break;
4385 
4386 	  /* Select and remove the insn from the ready list.  */
4387 	  if (sort_p)
4388 	    {
4389 	      int res;
4390 
4391 	      insn = NULL_RTX;
4392 	      res = choose_ready (&ready, ls.first_cycle_insn_p, &insn);
4393 
4394 	      if (res < 0)
4395 		/* Finish cycle.  */
4396 		break;
4397 	      if (res > 0)
4398 		goto restart_choose_ready;
4399 
4400 	      gcc_assert (insn != NULL_RTX);
4401 	    }
4402 	  else
4403 	    insn = ready_remove_first (&ready);
4404 
4405 	  if (sched_pressure_p && INSN_TICK (insn) > clock_var)
4406 	    {
4407 	      ready_add (&ready, insn, true);
4408 	      advance = 1;
4409 	      break;
4410 	    }
4411 
4412 	  if (targetm.sched.dfa_new_cycle
4413 	      && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
4414 					      insn, last_clock_var,
4415 					      clock_var, &sort_p))
4416 	    /* SORT_P is used by the target to override sorting
4417 	       of the ready list.  This is needed when the target
4418 	       has modified its internal structures expecting that
4419 	       the insn will be issued next.  As we need the insn
4420 	       to have the highest priority (so it will be returned by
4421 	       the ready_remove_first call above), we invoke
4422 	       ready_add (&ready, insn, true).
4423 	       But, still, there is one issue: INSN can be later
4424 	       discarded by scheduler's front end through
4425 	       current_sched_info->can_schedule_ready_p, hence, won't
4426 	       be issued next.  */
4427 	    {
4428 	      ready_add (&ready, insn, true);
4429               break;
4430 	    }
4431 
4432 	  sort_p = TRUE;
4433 
4434 	  if (current_sched_info->can_schedule_ready_p
4435 	      && ! (*current_sched_info->can_schedule_ready_p) (insn))
4436 	    /* We normally get here only if we don't want to move
4437 	       insn from the split block.  */
4438 	    {
4439 	      TODO_SPEC (insn) = HARD_DEP;
4440 	      goto restart_choose_ready;
4441 	    }
4442 
4443 	  if (delay_htab)
4444 	    {
4445 	      /* If this insn is the first part of a delay-slot pair, record a
4446 		 backtrack point.  */
4447 	      struct delay_pair *delay_entry;
4448 	      delay_entry
4449 		= (struct delay_pair *)htab_find_with_hash (delay_htab, insn,
4450 							    htab_hash_pointer (insn));
4451 	      if (delay_entry)
4452 		{
4453 		  save_backtrack_point (delay_entry, ls);
4454 		  if (sched_verbose >= 2)
4455 		    fprintf (sched_dump, ";;\t\tsaving backtrack point\n");
4456 		}
4457 	    }
4458 
4459 	  /* DECISION is made.  */
4460 
4461 	  if (modulo_ii > 0 && INSN_UID (insn) < modulo_iter0_max_uid)
4462 	    {
4463 	      modulo_insns_scheduled++;
4464 	      modulo_last_stage = clock_var / modulo_ii;
4465 	    }
4466           if (TODO_SPEC (insn) & SPECULATIVE)
4467             generate_recovery_code (insn);
4468 
4469 	  if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
4470 	    targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
4471 
4472 	  /* Update counters, etc in the scheduler's front end.  */
4473 	  (*current_sched_info->begin_schedule_ready) (insn);
4474 	  VEC_safe_push (rtx, heap, scheduled_insns, insn);
4475 	  gcc_assert (NONDEBUG_INSN_P (insn));
4476 	  last_nondebug_scheduled_insn = last_scheduled_insn = insn;
4477 
4478 	  if (recog_memoized (insn) >= 0)
4479 	    {
4480 	      memcpy (temp_state, curr_state, dfa_state_size);
4481 	      cost = state_transition (curr_state, insn);
4482 	      if (!sched_pressure_p)
4483 		gcc_assert (cost < 0);
4484 	      if (memcmp (temp_state, curr_state, dfa_state_size) != 0)
4485 		cycle_issued_insns++;
4486 	      asm_p = false;
4487 	    }
4488 	  else
4489 	    asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
4490 		     || asm_noperands (PATTERN (insn)) >= 0);
4491 
4492 	  if (targetm.sched.variable_issue)
4493 	    ls.can_issue_more =
4494 	      targetm.sched.variable_issue (sched_dump, sched_verbose,
4495 					    insn, ls.can_issue_more);
4496 	  /* A naked CLOBBER or USE generates no instruction, so do
4497 	     not count them against the issue rate.  */
4498 	  else if (GET_CODE (PATTERN (insn)) != USE
4499 		   && GET_CODE (PATTERN (insn)) != CLOBBER)
4500 	    ls.can_issue_more--;
4501 	  advance = schedule_insn (insn);
4502 
4503 	  if (SHADOW_P (insn))
4504 	    ls.shadows_only_p = true;
4505 
4506 	  /* After issuing an asm insn we should start a new cycle.  */
4507 	  if (advance == 0 && asm_p)
4508 	    advance = 1;
4509 
4510 	  if (must_backtrack)
4511 	    break;
4512 
4513 	  if (advance != 0)
4514 	    break;
4515 
4516 	  ls.first_cycle_insn_p = false;
4517 	  if (ready.n_ready > 0)
4518 	    prune_ready_list (temp_state, false, ls.shadows_only_p,
4519 			      ls.modulo_epilogue);
4520 	}
4521 
4522     do_backtrack:
4523       if (!must_backtrack)
4524 	for (i = 0; i < ready.n_ready; i++)
4525 	  {
4526 	    rtx insn = ready_element (&ready, i);
4527 	    if (INSN_EXACT_TICK (insn) == clock_var)
4528 	      {
4529 		must_backtrack = true;
4530 		clock_var++;
4531 		break;
4532 	      }
4533 	  }
4534       if (must_backtrack && modulo_ii > 0)
4535 	{
4536 	  if (modulo_backtracks_left == 0)
4537 	    goto end_schedule;
4538 	  modulo_backtracks_left--;
4539 	}
4540       while (must_backtrack)
4541 	{
4542 	  struct haifa_saved_data *failed;
4543 	  rtx failed_insn;
4544 
4545 	  must_backtrack = false;
4546 	  failed = verify_shadows ();
4547 	  gcc_assert (failed);
4548 
4549 	  failed_insn = failed->delay_pair->i1;
4550 	  toggle_cancelled_flags (false);
4551 	  unschedule_insns_until (failed_insn);
4552 	  while (failed != backtrack_queue)
4553 	    free_topmost_backtrack_point (true);
4554 	  restore_last_backtrack_point (&ls);
4555 	  if (sched_verbose >= 2)
4556 	    fprintf (sched_dump, ";;\t\trewind to cycle %d\n", clock_var);
4557 	  /* Delay by at least a cycle.  This could cause additional
4558 	     backtracking.  */
4559 	  queue_insn (failed_insn, 1, "backtracked");
4560 	  advance = 0;
4561 	  if (must_backtrack)
4562 	    continue;
4563 	  if (ready.n_ready > 0)
4564 	    goto resume_after_backtrack;
4565 	  else
4566 	    {
4567 	      if (clock_var == 0 && ls.first_cycle_insn_p)
4568 		goto end_schedule;
4569 	      advance = 1;
4570 	      break;
4571 	    }
4572 	}
4573     }
4574   if (ls.modulo_epilogue)
4575     success = true;
4576  end_schedule:
4577   if (modulo_ii > 0)
4578     {
4579       /* Once again, debug insn suckiness: they can be on the ready list
4580 	 even if they have unresolved dependencies.  To make our view
4581 	 of the world consistent, remove such "ready" insns.  */
4582     restart_debug_insn_loop:
4583       for (i = ready.n_ready - 1; i >= 0; i--)
4584 	{
4585 	  rtx x;
4586 
4587 	  x = ready_element (&ready, i);
4588 	  if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (x)) != NULL
4589 	      || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (x)) != NULL)
4590 	    {
4591 	      ready_remove (&ready, i);
4592 	      goto restart_debug_insn_loop;
4593 	    }
4594 	}
4595       for (i = ready.n_ready - 1; i >= 0; i--)
4596 	{
4597 	  rtx x;
4598 
4599 	  x = ready_element (&ready, i);
4600 	  resolve_dependencies (x);
4601 	}
4602       for (i = 0; i <= max_insn_queue_index; i++)
4603 	{
4604 	  rtx link;
4605 	  while ((link = insn_queue[i]) != NULL)
4606 	    {
4607 	      rtx x = XEXP (link, 0);
4608 	      insn_queue[i] = XEXP (link, 1);
4609 	      QUEUE_INDEX (x) = QUEUE_NOWHERE;
4610 	      free_INSN_LIST_node (link);
4611 	      resolve_dependencies (x);
4612 	    }
4613 	}
4614     }
4615 
4616   /* Debug info.  */
4617   if (sched_verbose)
4618     {
4619       fprintf (sched_dump, ";;\tReady list (final):  ");
4620       debug_ready_list (&ready);
4621     }
4622 
4623   if (modulo_ii == 0 && current_sched_info->queue_must_finish_empty)
4624     /* Sanity check -- queue must be empty now.  Meaningless if region has
4625        multiple bbs.  */
4626     gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
4627   else if (modulo_ii == 0)
4628     {
4629       /* We must maintain QUEUE_INDEX between blocks in region.  */
4630       for (i = ready.n_ready - 1; i >= 0; i--)
4631 	{
4632 	  rtx x;
4633 
4634 	  x = ready_element (&ready, i);
4635 	  QUEUE_INDEX (x) = QUEUE_NOWHERE;
4636 	  TODO_SPEC (x) = HARD_DEP;
4637 	}
4638 
4639       if (q_size)
4640 	for (i = 0; i <= max_insn_queue_index; i++)
4641 	  {
4642 	    rtx link;
4643 	    for (link = insn_queue[i]; link; link = XEXP (link, 1))
4644 	      {
4645 		rtx x;
4646 
4647 		x = XEXP (link, 0);
4648 		QUEUE_INDEX (x) = QUEUE_NOWHERE;
4649 		TODO_SPEC (x) = HARD_DEP;
4650 	      }
4651 	    free_INSN_LIST_list (&insn_queue[i]);
4652 	  }
4653     }
4654 
4655   if (success)
4656     {
4657       commit_schedule (prev_head, tail, target_bb);
4658       if (sched_verbose)
4659 	fprintf (sched_dump, ";;   total time = %d\n", clock_var);
4660     }
4661   else
4662     last_scheduled_insn = tail;
4663 
4664   VEC_truncate (rtx, scheduled_insns, 0);
4665 
4666   if (!current_sched_info->queue_must_finish_empty
4667       || haifa_recovery_bb_recently_added_p)
4668     {
4669       /* INSN_TICK (minimum clock tick at which the insn becomes
4670          ready) may be not correct for the insn in the subsequent
4671          blocks of the region.  We should use a correct value of
4672          `clock_var' or modify INSN_TICK.  It is better to keep
4673          clock_var value equal to 0 at the start of a basic block.
4674          Therefore we modify INSN_TICK here.  */
4675       fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
4676     }
4677 
4678   if (targetm.sched.finish)
4679     {
4680       targetm.sched.finish (sched_dump, sched_verbose);
4681       /* Target might have added some instructions to the scheduled block
4682 	 in its md_finish () hook.  These new insns don't have any data
4683 	 initialized and to identify them we extend h_i_d so that they'll
4684 	 get zero luids.  */
4685       sched_extend_luids ();
4686     }
4687 
4688   if (sched_verbose)
4689     fprintf (sched_dump, ";;   new head = %d\n;;   new tail = %d\n\n",
4690 	     INSN_UID (head), INSN_UID (tail));
4691 
4692   /* Update head/tail boundaries.  */
4693   head = NEXT_INSN (prev_head);
4694   tail = last_scheduled_insn;
4695 
4696   head = restore_other_notes (head, NULL);
4697 
4698   current_sched_info->head = head;
4699   current_sched_info->tail = tail;
4700 
4701   free_backtrack_queue ();
4702 
4703   return success;
4704 }
4705 
4706 /* Set_priorities: compute priority of each insn in the block.  */
4707 
4708 int
4709 set_priorities (rtx head, rtx tail)
4710 {
4711   rtx insn;
4712   int n_insn;
4713   int sched_max_insns_priority =
4714 	current_sched_info->sched_max_insns_priority;
4715   rtx prev_head;
4716 
4717   if (head == tail && ! INSN_P (head))
4718     gcc_unreachable ();
4719 
4720   n_insn = 0;
4721 
4722   prev_head = PREV_INSN (head);
4723   for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
4724     {
4725       if (!INSN_P (insn))
4726 	continue;
4727 
4728       n_insn++;
4729       (void) priority (insn);
4730 
4731       gcc_assert (INSN_PRIORITY_KNOWN (insn));
4732 
4733       sched_max_insns_priority = MAX (sched_max_insns_priority,
4734 				      INSN_PRIORITY (insn));
4735     }
4736 
4737   current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
4738 
4739   return n_insn;
4740 }
4741 
4742 /* Set dump and sched_verbose for the desired debugging output.  If no
4743    dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
4744    For -fsched-verbose=N, N>=10, print everything to stderr.  */
4745 void
4746 setup_sched_dump (void)
4747 {
4748   sched_verbose = sched_verbose_param;
4749   if (sched_verbose_param == 0 && dump_file)
4750     sched_verbose = 1;
4751   sched_dump = ((sched_verbose_param >= 10 || !dump_file)
4752 		? stderr : dump_file);
4753 }
4754 
4755 /* Initialize some global state for the scheduler.  This function works
4756    with the common data shared between all the schedulers.  It is called
4757    from the scheduler specific initialization routine.  */
4758 
4759 void
4760 sched_init (void)
4761 {
4762   /* Disable speculative loads in their presence if cc0 defined.  */
4763 #ifdef HAVE_cc0
4764   flag_schedule_speculative_load = 0;
4765 #endif
4766 
4767   if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
4768     targetm.sched.dispatch_do (NULL_RTX, DISPATCH_INIT);
4769 
4770   sched_pressure_p = (flag_sched_pressure && ! reload_completed
4771 		      && common_sched_info->sched_pass_id == SCHED_RGN_PASS);
4772 
4773   if (sched_pressure_p)
4774     ira_setup_eliminable_regset ();
4775 
4776   /* Initialize SPEC_INFO.  */
4777   if (targetm.sched.set_sched_flags)
4778     {
4779       spec_info = &spec_info_var;
4780       targetm.sched.set_sched_flags (spec_info);
4781 
4782       if (spec_info->mask != 0)
4783         {
4784           spec_info->data_weakness_cutoff =
4785             (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
4786           spec_info->control_weakness_cutoff =
4787             (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
4788              * REG_BR_PROB_BASE) / 100;
4789         }
4790       else
4791 	/* So we won't read anything accidentally.  */
4792 	spec_info = NULL;
4793 
4794     }
4795   else
4796     /* So we won't read anything accidentally.  */
4797     spec_info = 0;
4798 
4799   /* Initialize issue_rate.  */
4800   if (targetm.sched.issue_rate)
4801     issue_rate = targetm.sched.issue_rate ();
4802   else
4803     issue_rate = 1;
4804 
4805   if (cached_issue_rate != issue_rate)
4806     {
4807       cached_issue_rate = issue_rate;
4808       /* To invalidate max_lookahead_tries:  */
4809       cached_first_cycle_multipass_dfa_lookahead = 0;
4810     }
4811 
4812   if (targetm.sched.first_cycle_multipass_dfa_lookahead)
4813     dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
4814   else
4815     dfa_lookahead = 0;
4816 
4817   if (targetm.sched.init_dfa_pre_cycle_insn)
4818     targetm.sched.init_dfa_pre_cycle_insn ();
4819 
4820   if (targetm.sched.init_dfa_post_cycle_insn)
4821     targetm.sched.init_dfa_post_cycle_insn ();
4822 
4823   dfa_start ();
4824   dfa_state_size = state_size ();
4825 
4826   init_alias_analysis ();
4827 
4828   if (!sched_no_dce)
4829     df_set_flags (DF_LR_RUN_DCE);
4830   df_note_add_problem ();
4831 
4832   /* More problems needed for interloop dep calculation in SMS.  */
4833   if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
4834     {
4835       df_rd_add_problem ();
4836       df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
4837     }
4838 
4839   df_analyze ();
4840 
4841   /* Do not run DCE after reload, as this can kill nops inserted
4842      by bundling.  */
4843   if (reload_completed)
4844     df_clear_flags (DF_LR_RUN_DCE);
4845 
4846   regstat_compute_calls_crossed ();
4847 
4848   if (targetm.sched.init_global)
4849     targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
4850 
4851   if (sched_pressure_p)
4852     {
4853       int i, max_regno = max_reg_num ();
4854 
4855       if (sched_dump != NULL)
4856 	/* We need info about pseudos for rtl dumps about pseudo
4857 	   classes and costs.  */
4858 	regstat_init_n_sets_and_refs ();
4859       ira_set_pseudo_classes (sched_verbose ? sched_dump : NULL);
4860       sched_regno_pressure_class
4861 	= (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
4862       for (i = 0; i < max_regno; i++)
4863 	sched_regno_pressure_class[i]
4864 	  = (i < FIRST_PSEUDO_REGISTER
4865 	     ? ira_pressure_class_translate[REGNO_REG_CLASS (i)]
4866 	     : ira_pressure_class_translate[reg_allocno_class (i)]);
4867       curr_reg_live = BITMAP_ALLOC (NULL);
4868       saved_reg_live = BITMAP_ALLOC (NULL);
4869       region_ref_regs = BITMAP_ALLOC (NULL);
4870     }
4871 
4872   curr_state = xmalloc (dfa_state_size);
4873 }
4874 
4875 static void haifa_init_only_bb (basic_block, basic_block);
4876 
4877 /* Initialize data structures specific to the Haifa scheduler.  */
4878 void
4879 haifa_sched_init (void)
4880 {
4881   setup_sched_dump ();
4882   sched_init ();
4883 
4884   scheduled_insns = VEC_alloc (rtx, heap, 0);
4885 
4886   if (spec_info != NULL)
4887     {
4888       sched_deps_info->use_deps_list = 1;
4889       sched_deps_info->generate_spec_deps = 1;
4890     }
4891 
4892   /* Initialize luids, dependency caches, target and h_i_d for the
4893      whole function.  */
4894   {
4895     bb_vec_t bbs = VEC_alloc (basic_block, heap, n_basic_blocks);
4896     basic_block bb;
4897 
4898     sched_init_bbs ();
4899 
4900     FOR_EACH_BB (bb)
4901       VEC_quick_push (basic_block, bbs, bb);
4902     sched_init_luids (bbs);
4903     sched_deps_init (true);
4904     sched_extend_target ();
4905     haifa_init_h_i_d (bbs);
4906 
4907     VEC_free (basic_block, heap, bbs);
4908   }
4909 
4910   sched_init_only_bb = haifa_init_only_bb;
4911   sched_split_block = sched_split_block_1;
4912   sched_create_empty_bb = sched_create_empty_bb_1;
4913   haifa_recovery_bb_ever_added_p = false;
4914 
4915   nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
4916   before_recovery = 0;
4917   after_recovery = 0;
4918 
4919   modulo_ii = 0;
4920 }
4921 
4922 /* Finish work with the data specific to the Haifa scheduler.  */
4923 void
4924 haifa_sched_finish (void)
4925 {
4926   sched_create_empty_bb = NULL;
4927   sched_split_block = NULL;
4928   sched_init_only_bb = NULL;
4929 
4930   if (spec_info && spec_info->dump)
4931     {
4932       char c = reload_completed ? 'a' : 'b';
4933 
4934       fprintf (spec_info->dump,
4935 	       ";; %s:\n", current_function_name ());
4936 
4937       fprintf (spec_info->dump,
4938                ";; Procedure %cr-begin-data-spec motions == %d\n",
4939                c, nr_begin_data);
4940       fprintf (spec_info->dump,
4941                ";; Procedure %cr-be-in-data-spec motions == %d\n",
4942                c, nr_be_in_data);
4943       fprintf (spec_info->dump,
4944                ";; Procedure %cr-begin-control-spec motions == %d\n",
4945                c, nr_begin_control);
4946       fprintf (spec_info->dump,
4947                ";; Procedure %cr-be-in-control-spec motions == %d\n",
4948                c, nr_be_in_control);
4949     }
4950 
4951   VEC_free (rtx, heap, scheduled_insns);
4952 
4953   /* Finalize h_i_d, dependency caches, and luids for the whole
4954      function.  Target will be finalized in md_global_finish ().  */
4955   sched_deps_finish ();
4956   sched_finish_luids ();
4957   current_sched_info = NULL;
4958   sched_finish ();
4959 }
4960 
4961 /* Free global data used during insn scheduling.  This function works with
4962    the common data shared between the schedulers.  */
4963 
4964 void
4965 sched_finish (void)
4966 {
4967   haifa_finish_h_i_d ();
4968   if (sched_pressure_p)
4969     {
4970       if (regstat_n_sets_and_refs != NULL)
4971 	regstat_free_n_sets_and_refs ();
4972       free (sched_regno_pressure_class);
4973       BITMAP_FREE (region_ref_regs);
4974       BITMAP_FREE (saved_reg_live);
4975       BITMAP_FREE (curr_reg_live);
4976     }
4977   free (curr_state);
4978 
4979   if (targetm.sched.finish_global)
4980     targetm.sched.finish_global (sched_dump, sched_verbose);
4981 
4982   end_alias_analysis ();
4983 
4984   regstat_free_calls_crossed ();
4985 
4986   dfa_finish ();
4987 }
4988 
4989 /* Free all delay_pair structures that were recorded.  */
4990 void
4991 free_delay_pairs (void)
4992 {
4993   if (delay_htab)
4994     {
4995       htab_empty (delay_htab);
4996       htab_empty (delay_htab_i2);
4997     }
4998 }
4999 
5000 /* Fix INSN_TICKs of the instructions in the current block as well as
5001    INSN_TICKs of their dependents.
5002    HEAD and TAIL are the begin and the end of the current scheduled block.  */
5003 static void
5004 fix_inter_tick (rtx head, rtx tail)
5005 {
5006   /* Set of instructions with corrected INSN_TICK.  */
5007   bitmap_head processed;
5008   /* ??? It is doubtful if we should assume that cycle advance happens on
5009      basic block boundaries.  Basically insns that are unconditionally ready
5010      on the start of the block are more preferable then those which have
5011      a one cycle dependency over insn from the previous block.  */
5012   int next_clock = clock_var + 1;
5013 
5014   bitmap_initialize (&processed, 0);
5015 
5016   /* Iterates over scheduled instructions and fix their INSN_TICKs and
5017      INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
5018      across different blocks.  */
5019   for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
5020     {
5021       if (INSN_P (head))
5022 	{
5023 	  int tick;
5024 	  sd_iterator_def sd_it;
5025 	  dep_t dep;
5026 
5027 	  tick = INSN_TICK (head);
5028 	  gcc_assert (tick >= MIN_TICK);
5029 
5030 	  /* Fix INSN_TICK of instruction from just scheduled block.  */
5031 	  if (bitmap_set_bit (&processed, INSN_LUID (head)))
5032 	    {
5033 	      tick -= next_clock;
5034 
5035 	      if (tick < MIN_TICK)
5036 		tick = MIN_TICK;
5037 
5038 	      INSN_TICK (head) = tick;
5039 	    }
5040 
5041 	  if (DEBUG_INSN_P (head))
5042 	    continue;
5043 
5044 	  FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
5045 	    {
5046 	      rtx next;
5047 
5048 	      next = DEP_CON (dep);
5049 	      tick = INSN_TICK (next);
5050 
5051 	      if (tick != INVALID_TICK
5052 		  /* If NEXT has its INSN_TICK calculated, fix it.
5053 		     If not - it will be properly calculated from
5054 		     scratch later in fix_tick_ready.  */
5055 		  && bitmap_set_bit (&processed, INSN_LUID (next)))
5056 		{
5057 		  tick -= next_clock;
5058 
5059 		  if (tick < MIN_TICK)
5060 		    tick = MIN_TICK;
5061 
5062 		  if (tick > INTER_TICK (next))
5063 		    INTER_TICK (next) = tick;
5064 		  else
5065 		    tick = INTER_TICK (next);
5066 
5067 		  INSN_TICK (next) = tick;
5068 		}
5069 	    }
5070 	}
5071     }
5072   bitmap_clear (&processed);
5073 }
5074 
5075 /* Check if NEXT is ready to be added to the ready or queue list.
5076    If "yes", add it to the proper list.
5077    Returns:
5078       -1 - is not ready yet,
5079        0 - added to the ready list,
5080    0 < N - queued for N cycles.  */
5081 int
5082 try_ready (rtx next)
5083 {
5084   ds_t old_ts, new_ts;
5085 
5086   old_ts = TODO_SPEC (next);
5087 
5088   gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP | DEP_CONTROL))
5089 	      && ((old_ts & HARD_DEP)
5090 		  || (old_ts & SPECULATIVE)
5091 		  || (old_ts & DEP_CONTROL)));
5092 
5093   new_ts = recompute_todo_spec (next);
5094 
5095   if (new_ts & HARD_DEP)
5096     gcc_assert (new_ts == old_ts
5097 		&& QUEUE_INDEX (next) == QUEUE_NOWHERE);
5098   else if (current_sched_info->new_ready)
5099     new_ts = current_sched_info->new_ready (next, new_ts);
5100 
5101   /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
5102      have its original pattern or changed (speculative) one.  This is due
5103      to changing ebb in region scheduling.
5104      * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
5105      has speculative pattern.
5106 
5107      We can't assert (!(new_ts & HARD_DEP) || new_ts == old_ts) here because
5108      control-speculative NEXT could have been discarded by sched-rgn.c
5109      (the same case as when discarded by can_schedule_ready_p ()).  */
5110 
5111   if ((new_ts & SPECULATIVE)
5112       /* If (old_ts == new_ts), then (old_ts & SPECULATIVE) and we don't
5113 	 need to change anything.  */
5114       && new_ts != old_ts)
5115     {
5116       int res;
5117       rtx new_pat;
5118 
5119       gcc_assert ((new_ts & SPECULATIVE) && !(new_ts & ~SPECULATIVE));
5120 
5121       res = haifa_speculate_insn (next, new_ts, &new_pat);
5122 
5123       switch (res)
5124 	{
5125 	case -1:
5126 	  /* It would be nice to change DEP_STATUS of all dependences,
5127 	     which have ((DEP_STATUS & SPECULATIVE) == new_ts) to HARD_DEP,
5128 	     so we won't reanalyze anything.  */
5129 	  new_ts = HARD_DEP;
5130 	  break;
5131 
5132 	case 0:
5133 	  /* We follow the rule, that every speculative insn
5134 	     has non-null ORIG_PAT.  */
5135 	  if (!ORIG_PAT (next))
5136 	    ORIG_PAT (next) = PATTERN (next);
5137 	  break;
5138 
5139 	case 1:
5140 	  if (!ORIG_PAT (next))
5141 	    /* If we gonna to overwrite the original pattern of insn,
5142 	       save it.  */
5143 	    ORIG_PAT (next) = PATTERN (next);
5144 
5145 	  res = haifa_change_pattern (next, new_pat);
5146 	  gcc_assert (res);
5147 	  break;
5148 
5149 	default:
5150 	  gcc_unreachable ();
5151 	}
5152     }
5153 
5154   /* We need to restore pattern only if (new_ts == 0), because otherwise it is
5155      either correct (new_ts & SPECULATIVE),
5156      or we simply don't care (new_ts & HARD_DEP).  */
5157 
5158   gcc_assert (!ORIG_PAT (next)
5159 	      || !IS_SPECULATION_BRANCHY_CHECK_P (next));
5160 
5161   TODO_SPEC (next) = new_ts;
5162 
5163   if (new_ts & HARD_DEP)
5164     {
5165       /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
5166 	 control-speculative NEXT could have been discarded by sched-rgn.c
5167 	 (the same case as when discarded by can_schedule_ready_p ()).  */
5168       /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
5169 
5170       change_queue_index (next, QUEUE_NOWHERE);
5171 
5172       return -1;
5173     }
5174   else if (!(new_ts & BEGIN_SPEC)
5175 	   && ORIG_PAT (next) && PREDICATED_PAT (next) == NULL_RTX
5176 	   && !IS_SPECULATION_CHECK_P (next))
5177     /* We should change pattern of every previously speculative
5178        instruction - and we determine if NEXT was speculative by using
5179        ORIG_PAT field.  Except one case - speculation checks have ORIG_PAT
5180        pat too, so skip them.  */
5181     {
5182       bool success = haifa_change_pattern (next, ORIG_PAT (next));
5183       gcc_assert (success);
5184       ORIG_PAT (next) = 0;
5185     }
5186 
5187   if (sched_verbose >= 2)
5188     {
5189       fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
5190                (*current_sched_info->print_insn) (next, 0));
5191 
5192       if (spec_info && spec_info->dump)
5193         {
5194           if (new_ts & BEGIN_DATA)
5195             fprintf (spec_info->dump, "; data-spec;");
5196           if (new_ts & BEGIN_CONTROL)
5197             fprintf (spec_info->dump, "; control-spec;");
5198           if (new_ts & BE_IN_CONTROL)
5199             fprintf (spec_info->dump, "; in-control-spec;");
5200         }
5201       if (TODO_SPEC (next) & DEP_CONTROL)
5202 	fprintf (sched_dump, " predicated");
5203       fprintf (sched_dump, "\n");
5204     }
5205 
5206   adjust_priority (next);
5207 
5208   return fix_tick_ready (next);
5209 }
5210 
5211 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list.  */
5212 static int
5213 fix_tick_ready (rtx next)
5214 {
5215   int tick, delay;
5216 
5217   if (!DEBUG_INSN_P (next) && !sd_lists_empty_p (next, SD_LIST_RES_BACK))
5218     {
5219       int full_p;
5220       sd_iterator_def sd_it;
5221       dep_t dep;
5222 
5223       tick = INSN_TICK (next);
5224       /* if tick is not equal to INVALID_TICK, then update
5225 	 INSN_TICK of NEXT with the most recent resolved dependence
5226 	 cost.  Otherwise, recalculate from scratch.  */
5227       full_p = (tick == INVALID_TICK);
5228 
5229       FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
5230         {
5231           rtx pro = DEP_PRO (dep);
5232           int tick1;
5233 
5234 	  gcc_assert (INSN_TICK (pro) >= MIN_TICK);
5235 
5236           tick1 = INSN_TICK (pro) + dep_cost (dep);
5237           if (tick1 > tick)
5238             tick = tick1;
5239 
5240 	  if (!full_p)
5241 	    break;
5242         }
5243     }
5244   else
5245     tick = -1;
5246 
5247   INSN_TICK (next) = tick;
5248 
5249   delay = tick - clock_var;
5250   if (delay <= 0 || sched_pressure_p)
5251     delay = QUEUE_READY;
5252 
5253   change_queue_index (next, delay);
5254 
5255   return delay;
5256 }
5257 
5258 /* Move NEXT to the proper queue list with (DELAY >= 1),
5259    or add it to the ready list (DELAY == QUEUE_READY),
5260    or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE).  */
5261 static void
5262 change_queue_index (rtx next, int delay)
5263 {
5264   int i = QUEUE_INDEX (next);
5265 
5266   gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
5267 	      && delay != 0);
5268   gcc_assert (i != QUEUE_SCHEDULED);
5269 
5270   if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
5271       || (delay < 0 && delay == i))
5272     /* We have nothing to do.  */
5273     return;
5274 
5275   /* Remove NEXT from wherever it is now.  */
5276   if (i == QUEUE_READY)
5277     ready_remove_insn (next);
5278   else if (i >= 0)
5279     queue_remove (next);
5280 
5281   /* Add it to the proper place.  */
5282   if (delay == QUEUE_READY)
5283     ready_add (readyp, next, false);
5284   else if (delay >= 1)
5285     queue_insn (next, delay, "change queue index");
5286 
5287   if (sched_verbose >= 2)
5288     {
5289       fprintf (sched_dump, ";;\t\ttick updated: insn %s",
5290 	       (*current_sched_info->print_insn) (next, 0));
5291 
5292       if (delay == QUEUE_READY)
5293 	fprintf (sched_dump, " into ready\n");
5294       else if (delay >= 1)
5295 	fprintf (sched_dump, " into queue with cost=%d\n", delay);
5296       else
5297 	fprintf (sched_dump, " removed from ready or queue lists\n");
5298     }
5299 }
5300 
5301 static int sched_ready_n_insns = -1;
5302 
5303 /* Initialize per region data structures.  */
5304 void
5305 sched_extend_ready_list (int new_sched_ready_n_insns)
5306 {
5307   int i;
5308 
5309   if (sched_ready_n_insns == -1)
5310     /* At the first call we need to initialize one more choice_stack
5311        entry.  */
5312     {
5313       i = 0;
5314       sched_ready_n_insns = 0;
5315       VEC_reserve (rtx, heap, scheduled_insns, new_sched_ready_n_insns);
5316     }
5317   else
5318     i = sched_ready_n_insns + 1;
5319 
5320   ready.veclen = new_sched_ready_n_insns + issue_rate;
5321   ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen);
5322 
5323   gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
5324 
5325   ready_try = (char *) xrecalloc (ready_try, new_sched_ready_n_insns,
5326                                   sched_ready_n_insns, sizeof (*ready_try));
5327 
5328   /* We allocate +1 element to save initial state in the choice_stack[0]
5329      entry.  */
5330   choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
5331 			     new_sched_ready_n_insns + 1);
5332 
5333   for (; i <= new_sched_ready_n_insns; i++)
5334     {
5335       choice_stack[i].state = xmalloc (dfa_state_size);
5336 
5337       if (targetm.sched.first_cycle_multipass_init)
5338 	targetm.sched.first_cycle_multipass_init (&(choice_stack[i]
5339 						    .target_data));
5340     }
5341 
5342   sched_ready_n_insns = new_sched_ready_n_insns;
5343 }
5344 
5345 /* Free per region data structures.  */
5346 void
5347 sched_finish_ready_list (void)
5348 {
5349   int i;
5350 
5351   free (ready.vec);
5352   ready.vec = NULL;
5353   ready.veclen = 0;
5354 
5355   free (ready_try);
5356   ready_try = NULL;
5357 
5358   for (i = 0; i <= sched_ready_n_insns; i++)
5359     {
5360       if (targetm.sched.first_cycle_multipass_fini)
5361 	targetm.sched.first_cycle_multipass_fini (&(choice_stack[i]
5362 						    .target_data));
5363 
5364       free (choice_stack [i].state);
5365     }
5366   free (choice_stack);
5367   choice_stack = NULL;
5368 
5369   sched_ready_n_insns = -1;
5370 }
5371 
5372 static int
5373 haifa_luid_for_non_insn (rtx x)
5374 {
5375   gcc_assert (NOTE_P (x) || LABEL_P (x));
5376 
5377   return 0;
5378 }
5379 
5380 /* Generates recovery code for INSN.  */
5381 static void
5382 generate_recovery_code (rtx insn)
5383 {
5384   if (TODO_SPEC (insn) & BEGIN_SPEC)
5385     begin_speculative_block (insn);
5386 
5387   /* Here we have insn with no dependencies to
5388      instructions other then CHECK_SPEC ones.  */
5389 
5390   if (TODO_SPEC (insn) & BE_IN_SPEC)
5391     add_to_speculative_block (insn);
5392 }
5393 
5394 /* Helper function.
5395    Tries to add speculative dependencies of type FS between instructions
5396    in deps_list L and TWIN.  */
5397 static void
5398 process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs)
5399 {
5400   sd_iterator_def sd_it;
5401   dep_t dep;
5402 
5403   FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
5404     {
5405       ds_t ds;
5406       rtx consumer;
5407 
5408       consumer = DEP_CON (dep);
5409 
5410       ds = DEP_STATUS (dep);
5411 
5412       if (/* If we want to create speculative dep.  */
5413 	  fs
5414 	  /* And we can do that because this is a true dep.  */
5415 	  && (ds & DEP_TYPES) == DEP_TRUE)
5416 	{
5417 	  gcc_assert (!(ds & BE_IN_SPEC));
5418 
5419 	  if (/* If this dep can be overcome with 'begin speculation'.  */
5420 	      ds & BEGIN_SPEC)
5421 	    /* Then we have a choice: keep the dep 'begin speculative'
5422 	       or transform it into 'be in speculative'.  */
5423 	    {
5424 	      if (/* In try_ready we assert that if insn once became ready
5425 		     it can be removed from the ready (or queue) list only
5426 		     due to backend decision.  Hence we can't let the
5427 		     probability of the speculative dep to decrease.  */
5428 		  ds_weak (ds) <= ds_weak (fs))
5429 		{
5430 		  ds_t new_ds;
5431 
5432 		  new_ds = (ds & ~BEGIN_SPEC) | fs;
5433 
5434 		  if (/* consumer can 'be in speculative'.  */
5435 		      sched_insn_is_legitimate_for_speculation_p (consumer,
5436 								  new_ds))
5437 		    /* Transform it to be in speculative.  */
5438 		    ds = new_ds;
5439 		}
5440 	    }
5441 	  else
5442 	    /* Mark the dep as 'be in speculative'.  */
5443 	    ds |= fs;
5444 	}
5445 
5446       {
5447 	dep_def _new_dep, *new_dep = &_new_dep;
5448 
5449 	init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
5450 	sd_add_dep (new_dep, false);
5451       }
5452     }
5453 }
5454 
5455 /* Generates recovery code for BEGIN speculative INSN.  */
5456 static void
5457 begin_speculative_block (rtx insn)
5458 {
5459   if (TODO_SPEC (insn) & BEGIN_DATA)
5460     nr_begin_data++;
5461   if (TODO_SPEC (insn) & BEGIN_CONTROL)
5462     nr_begin_control++;
5463 
5464   create_check_block_twin (insn, false);
5465 
5466   TODO_SPEC (insn) &= ~BEGIN_SPEC;
5467 }
5468 
5469 static void haifa_init_insn (rtx);
5470 
5471 /* Generates recovery code for BE_IN speculative INSN.  */
5472 static void
5473 add_to_speculative_block (rtx insn)
5474 {
5475   ds_t ts;
5476   sd_iterator_def sd_it;
5477   dep_t dep;
5478   rtx twins = NULL;
5479   rtx_vec_t priorities_roots;
5480 
5481   ts = TODO_SPEC (insn);
5482   gcc_assert (!(ts & ~BE_IN_SPEC));
5483 
5484   if (ts & BE_IN_DATA)
5485     nr_be_in_data++;
5486   if (ts & BE_IN_CONTROL)
5487     nr_be_in_control++;
5488 
5489   TODO_SPEC (insn) &= ~BE_IN_SPEC;
5490   gcc_assert (!TODO_SPEC (insn));
5491 
5492   DONE_SPEC (insn) |= ts;
5493 
5494   /* First we convert all simple checks to branchy.  */
5495   for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
5496        sd_iterator_cond (&sd_it, &dep);)
5497     {
5498       rtx check = DEP_PRO (dep);
5499 
5500       if (IS_SPECULATION_SIMPLE_CHECK_P (check))
5501 	{
5502 	  create_check_block_twin (check, true);
5503 
5504 	  /* Restart search.  */
5505 	  sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
5506 	}
5507       else
5508 	/* Continue search.  */
5509 	sd_iterator_next (&sd_it);
5510     }
5511 
5512   priorities_roots = NULL;
5513   clear_priorities (insn, &priorities_roots);
5514 
5515   while (1)
5516     {
5517       rtx check, twin;
5518       basic_block rec;
5519 
5520       /* Get the first backward dependency of INSN.  */
5521       sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
5522       if (!sd_iterator_cond (&sd_it, &dep))
5523 	/* INSN has no backward dependencies left.  */
5524 	break;
5525 
5526       gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
5527 		  && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
5528 		  && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
5529 
5530       check = DEP_PRO (dep);
5531 
5532       gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
5533 		  && QUEUE_INDEX (check) == QUEUE_NOWHERE);
5534 
5535       rec = BLOCK_FOR_INSN (check);
5536 
5537       twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
5538       haifa_init_insn (twin);
5539 
5540       sd_copy_back_deps (twin, insn, true);
5541 
5542       if (sched_verbose && spec_info->dump)
5543         /* INSN_BB (insn) isn't determined for twin insns yet.
5544            So we can't use current_sched_info->print_insn.  */
5545         fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
5546                  INSN_UID (twin), rec->index);
5547 
5548       twins = alloc_INSN_LIST (twin, twins);
5549 
5550       /* Add dependences between TWIN and all appropriate
5551 	 instructions from REC.  */
5552       FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
5553 	{
5554 	  rtx pro = DEP_PRO (dep);
5555 
5556 	  gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
5557 
5558 	  /* INSN might have dependencies from the instructions from
5559 	     several recovery blocks.  At this iteration we process those
5560 	     producers that reside in REC.  */
5561 	  if (BLOCK_FOR_INSN (pro) == rec)
5562 	    {
5563 	      dep_def _new_dep, *new_dep = &_new_dep;
5564 
5565 	      init_dep (new_dep, pro, twin, REG_DEP_TRUE);
5566 	      sd_add_dep (new_dep, false);
5567 	    }
5568 	}
5569 
5570       process_insn_forw_deps_be_in_spec (insn, twin, ts);
5571 
5572       /* Remove all dependencies between INSN and insns in REC.  */
5573       for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
5574 	   sd_iterator_cond (&sd_it, &dep);)
5575 	{
5576 	  rtx pro = DEP_PRO (dep);
5577 
5578 	  if (BLOCK_FOR_INSN (pro) == rec)
5579 	    sd_delete_dep (sd_it);
5580 	  else
5581 	    sd_iterator_next (&sd_it);
5582 	}
5583     }
5584 
5585   /* We couldn't have added the dependencies between INSN and TWINS earlier
5586      because that would make TWINS appear in the INSN_BACK_DEPS (INSN).  */
5587   while (twins)
5588     {
5589       rtx twin;
5590 
5591       twin = XEXP (twins, 0);
5592 
5593       {
5594 	dep_def _new_dep, *new_dep = &_new_dep;
5595 
5596 	init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
5597 	sd_add_dep (new_dep, false);
5598       }
5599 
5600       twin = XEXP (twins, 1);
5601       free_INSN_LIST_node (twins);
5602       twins = twin;
5603     }
5604 
5605   calc_priorities (priorities_roots);
5606   VEC_free (rtx, heap, priorities_roots);
5607 }
5608 
5609 /* Extends and fills with zeros (only the new part) array pointed to by P.  */
5610 void *
5611 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
5612 {
5613   gcc_assert (new_nmemb >= old_nmemb);
5614   p = XRESIZEVAR (void, p, new_nmemb * size);
5615   memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
5616   return p;
5617 }
5618 
5619 /* Helper function.
5620    Find fallthru edge from PRED.  */
5621 edge
5622 find_fallthru_edge_from (basic_block pred)
5623 {
5624   edge e;
5625   basic_block succ;
5626 
5627   succ = pred->next_bb;
5628   gcc_assert (succ->prev_bb == pred);
5629 
5630   if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
5631     {
5632       e = find_fallthru_edge (pred->succs);
5633 
5634       if (e)
5635 	{
5636 	  gcc_assert (e->dest == succ);
5637 	  return e;
5638 	}
5639     }
5640   else
5641     {
5642       e = find_fallthru_edge (succ->preds);
5643 
5644       if (e)
5645 	{
5646 	  gcc_assert (e->src == pred);
5647 	  return e;
5648 	}
5649     }
5650 
5651   return NULL;
5652 }
5653 
5654 /* Extend per basic block data structures.  */
5655 static void
5656 sched_extend_bb (void)
5657 {
5658   rtx insn;
5659 
5660   /* The following is done to keep current_sched_info->next_tail non null.  */
5661   insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
5662   if (NEXT_INSN (insn) == 0
5663       || (!NOTE_P (insn)
5664 	  && !LABEL_P (insn)
5665 	  /* Don't emit a NOTE if it would end up before a BARRIER.  */
5666 	  && !BARRIER_P (NEXT_INSN (insn))))
5667     {
5668       rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
5669       /* Make insn appear outside BB.  */
5670       set_block_for_insn (note, NULL);
5671       BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
5672     }
5673 }
5674 
5675 /* Init per basic block data structures.  */
5676 void
5677 sched_init_bbs (void)
5678 {
5679   sched_extend_bb ();
5680 }
5681 
5682 /* Initialize BEFORE_RECOVERY variable.  */
5683 static void
5684 init_before_recovery (basic_block *before_recovery_ptr)
5685 {
5686   basic_block last;
5687   edge e;
5688 
5689   last = EXIT_BLOCK_PTR->prev_bb;
5690   e = find_fallthru_edge_from (last);
5691 
5692   if (e)
5693     {
5694       /* We create two basic blocks:
5695          1. Single instruction block is inserted right after E->SRC
5696          and has jump to
5697          2. Empty block right before EXIT_BLOCK.
5698          Between these two blocks recovery blocks will be emitted.  */
5699 
5700       basic_block single, empty;
5701       rtx x, label;
5702 
5703       /* If the fallthrough edge to exit we've found is from the block we've
5704 	 created before, don't do anything more.  */
5705       if (last == after_recovery)
5706 	return;
5707 
5708       adding_bb_to_current_region_p = false;
5709 
5710       single = sched_create_empty_bb (last);
5711       empty = sched_create_empty_bb (single);
5712 
5713       /* Add new blocks to the root loop.  */
5714       if (current_loops != NULL)
5715 	{
5716 	  add_bb_to_loop (single, VEC_index (loop_p, current_loops->larray, 0));
5717 	  add_bb_to_loop (empty, VEC_index (loop_p, current_loops->larray, 0));
5718 	}
5719 
5720       single->count = last->count;
5721       empty->count = last->count;
5722       single->frequency = last->frequency;
5723       empty->frequency = last->frequency;
5724       BB_COPY_PARTITION (single, last);
5725       BB_COPY_PARTITION (empty, last);
5726 
5727       redirect_edge_succ (e, single);
5728       make_single_succ_edge (single, empty, 0);
5729       make_single_succ_edge (empty, EXIT_BLOCK_PTR,
5730 			     EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
5731 
5732       label = block_label (empty);
5733       x = emit_jump_insn_after (gen_jump (label), BB_END (single));
5734       JUMP_LABEL (x) = label;
5735       LABEL_NUSES (label)++;
5736       haifa_init_insn (x);
5737 
5738       emit_barrier_after (x);
5739 
5740       sched_init_only_bb (empty, NULL);
5741       sched_init_only_bb (single, NULL);
5742       sched_extend_bb ();
5743 
5744       adding_bb_to_current_region_p = true;
5745       before_recovery = single;
5746       after_recovery = empty;
5747 
5748       if (before_recovery_ptr)
5749         *before_recovery_ptr = before_recovery;
5750 
5751       if (sched_verbose >= 2 && spec_info->dump)
5752         fprintf (spec_info->dump,
5753 		 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
5754                  last->index, single->index, empty->index);
5755     }
5756   else
5757     before_recovery = last;
5758 }
5759 
5760 /* Returns new recovery block.  */
5761 basic_block
5762 sched_create_recovery_block (basic_block *before_recovery_ptr)
5763 {
5764   rtx label;
5765   rtx barrier;
5766   basic_block rec;
5767 
5768   haifa_recovery_bb_recently_added_p = true;
5769   haifa_recovery_bb_ever_added_p = true;
5770 
5771   init_before_recovery (before_recovery_ptr);
5772 
5773   barrier = get_last_bb_insn (before_recovery);
5774   gcc_assert (BARRIER_P (barrier));
5775 
5776   label = emit_label_after (gen_label_rtx (), barrier);
5777 
5778   rec = create_basic_block (label, label, before_recovery);
5779 
5780   /* A recovery block always ends with an unconditional jump.  */
5781   emit_barrier_after (BB_END (rec));
5782 
5783   if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
5784     BB_SET_PARTITION (rec, BB_COLD_PARTITION);
5785 
5786   if (sched_verbose && spec_info->dump)
5787     fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
5788              rec->index);
5789 
5790   return rec;
5791 }
5792 
5793 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
5794    and emit necessary jumps.  */
5795 void
5796 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
5797 			     basic_block second_bb)
5798 {
5799   rtx label;
5800   rtx jump;
5801   int edge_flags;
5802 
5803   /* This is fixing of incoming edge.  */
5804   /* ??? Which other flags should be specified?  */
5805   if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
5806     /* Partition type is the same, if it is "unpartitioned".  */
5807     edge_flags = EDGE_CROSSING;
5808   else
5809     edge_flags = 0;
5810 
5811   make_edge (first_bb, rec, edge_flags);
5812   label = block_label (second_bb);
5813   jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
5814   JUMP_LABEL (jump) = label;
5815   LABEL_NUSES (label)++;
5816 
5817   if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
5818     /* Partition type is the same, if it is "unpartitioned".  */
5819     {
5820       /* Rewritten from cfgrtl.c.  */
5821       if (flag_reorder_blocks_and_partition
5822 	  && targetm_common.have_named_sections)
5823 	{
5824 	  /* We don't need the same note for the check because
5825 	     any_condjump_p (check) == true.  */
5826 	  add_reg_note (jump, REG_CROSSING_JUMP, NULL_RTX);
5827 	}
5828       edge_flags = EDGE_CROSSING;
5829     }
5830   else
5831     edge_flags = 0;
5832 
5833   make_single_succ_edge (rec, second_bb, edge_flags);
5834   if (dom_info_available_p (CDI_DOMINATORS))
5835     set_immediate_dominator (CDI_DOMINATORS, rec, first_bb);
5836 }
5837 
5838 /* This function creates recovery code for INSN.  If MUTATE_P is nonzero,
5839    INSN is a simple check, that should be converted to branchy one.  */
5840 static void
5841 create_check_block_twin (rtx insn, bool mutate_p)
5842 {
5843   basic_block rec;
5844   rtx label, check, twin;
5845   ds_t fs;
5846   sd_iterator_def sd_it;
5847   dep_t dep;
5848   dep_def _new_dep, *new_dep = &_new_dep;
5849   ds_t todo_spec;
5850 
5851   gcc_assert (ORIG_PAT (insn) != NULL_RTX);
5852 
5853   if (!mutate_p)
5854     todo_spec = TODO_SPEC (insn);
5855   else
5856     {
5857       gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
5858 		  && (TODO_SPEC (insn) & SPECULATIVE) == 0);
5859 
5860       todo_spec = CHECK_SPEC (insn);
5861     }
5862 
5863   todo_spec &= SPECULATIVE;
5864 
5865   /* Create recovery block.  */
5866   if (mutate_p || targetm.sched.needs_block_p (todo_spec))
5867     {
5868       rec = sched_create_recovery_block (NULL);
5869       label = BB_HEAD (rec);
5870     }
5871   else
5872     {
5873       rec = EXIT_BLOCK_PTR;
5874       label = NULL_RTX;
5875     }
5876 
5877   /* Emit CHECK.  */
5878   check = targetm.sched.gen_spec_check (insn, label, todo_spec);
5879 
5880   if (rec != EXIT_BLOCK_PTR)
5881     {
5882       /* To have mem_reg alive at the beginning of second_bb,
5883 	 we emit check BEFORE insn, so insn after splitting
5884 	 insn will be at the beginning of second_bb, which will
5885 	 provide us with the correct life information.  */
5886       check = emit_jump_insn_before (check, insn);
5887       JUMP_LABEL (check) = label;
5888       LABEL_NUSES (label)++;
5889     }
5890   else
5891     check = emit_insn_before (check, insn);
5892 
5893   /* Extend data structures.  */
5894   haifa_init_insn (check);
5895 
5896   /* CHECK is being added to current region.  Extend ready list.  */
5897   gcc_assert (sched_ready_n_insns != -1);
5898   sched_extend_ready_list (sched_ready_n_insns + 1);
5899 
5900   if (current_sched_info->add_remove_insn)
5901     current_sched_info->add_remove_insn (insn, 0);
5902 
5903   RECOVERY_BLOCK (check) = rec;
5904 
5905   if (sched_verbose && spec_info->dump)
5906     fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
5907              (*current_sched_info->print_insn) (check, 0));
5908 
5909   gcc_assert (ORIG_PAT (insn));
5910 
5911   /* Initialize TWIN (twin is a duplicate of original instruction
5912      in the recovery block).  */
5913   if (rec != EXIT_BLOCK_PTR)
5914     {
5915       sd_iterator_def sd_it;
5916       dep_t dep;
5917 
5918       FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
5919 	if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
5920 	  {
5921 	    struct _dep _dep2, *dep2 = &_dep2;
5922 
5923 	    init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
5924 
5925 	    sd_add_dep (dep2, true);
5926 	  }
5927 
5928       twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
5929       haifa_init_insn (twin);
5930 
5931       if (sched_verbose && spec_info->dump)
5932 	/* INSN_BB (insn) isn't determined for twin insns yet.
5933 	   So we can't use current_sched_info->print_insn.  */
5934 	fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
5935 		 INSN_UID (twin), rec->index);
5936     }
5937   else
5938     {
5939       ORIG_PAT (check) = ORIG_PAT (insn);
5940       HAS_INTERNAL_DEP (check) = 1;
5941       twin = check;
5942       /* ??? We probably should change all OUTPUT dependencies to
5943 	 (TRUE | OUTPUT).  */
5944     }
5945 
5946   /* Copy all resolved back dependencies of INSN to TWIN.  This will
5947      provide correct value for INSN_TICK (TWIN).  */
5948   sd_copy_back_deps (twin, insn, true);
5949 
5950   if (rec != EXIT_BLOCK_PTR)
5951     /* In case of branchy check, fix CFG.  */
5952     {
5953       basic_block first_bb, second_bb;
5954       rtx jump;
5955 
5956       first_bb = BLOCK_FOR_INSN (check);
5957       second_bb = sched_split_block (first_bb, check);
5958 
5959       sched_create_recovery_edges (first_bb, rec, second_bb);
5960 
5961       sched_init_only_bb (second_bb, first_bb);
5962       sched_init_only_bb (rec, EXIT_BLOCK_PTR);
5963 
5964       jump = BB_END (rec);
5965       haifa_init_insn (jump);
5966     }
5967 
5968   /* Move backward dependences from INSN to CHECK and
5969      move forward dependences from INSN to TWIN.  */
5970 
5971   /* First, create dependencies between INSN's producers and CHECK & TWIN.  */
5972   FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
5973     {
5974       rtx pro = DEP_PRO (dep);
5975       ds_t ds;
5976 
5977       /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
5978 	 check --TRUE--> producer  ??? or ANTI ???
5979 	 twin  --TRUE--> producer
5980 	 twin  --ANTI--> check
5981 
5982 	 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
5983 	 check --ANTI--> producer
5984 	 twin  --ANTI--> producer
5985 	 twin  --ANTI--> check
5986 
5987 	 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
5988 	 check ~~TRUE~~> producer
5989 	 twin  ~~TRUE~~> producer
5990 	 twin  --ANTI--> check  */
5991 
5992       ds = DEP_STATUS (dep);
5993 
5994       if (ds & BEGIN_SPEC)
5995 	{
5996 	  gcc_assert (!mutate_p);
5997 	  ds &= ~BEGIN_SPEC;
5998 	}
5999 
6000       init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
6001       sd_add_dep (new_dep, false);
6002 
6003       if (rec != EXIT_BLOCK_PTR)
6004 	{
6005 	  DEP_CON (new_dep) = twin;
6006 	  sd_add_dep (new_dep, false);
6007 	}
6008     }
6009 
6010   /* Second, remove backward dependencies of INSN.  */
6011   for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
6012        sd_iterator_cond (&sd_it, &dep);)
6013     {
6014       if ((DEP_STATUS (dep) & BEGIN_SPEC)
6015 	  || mutate_p)
6016 	/* We can delete this dep because we overcome it with
6017 	   BEGIN_SPECULATION.  */
6018 	sd_delete_dep (sd_it);
6019       else
6020 	sd_iterator_next (&sd_it);
6021     }
6022 
6023   /* Future Speculations.  Determine what BE_IN speculations will be like.  */
6024   fs = 0;
6025 
6026   /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
6027      here.  */
6028 
6029   gcc_assert (!DONE_SPEC (insn));
6030 
6031   if (!mutate_p)
6032     {
6033       ds_t ts = TODO_SPEC (insn);
6034 
6035       DONE_SPEC (insn) = ts & BEGIN_SPEC;
6036       CHECK_SPEC (check) = ts & BEGIN_SPEC;
6037 
6038       /* Luckiness of future speculations solely depends upon initial
6039 	 BEGIN speculation.  */
6040       if (ts & BEGIN_DATA)
6041 	fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
6042       if (ts & BEGIN_CONTROL)
6043 	fs = set_dep_weak (fs, BE_IN_CONTROL,
6044 			   get_dep_weak (ts, BEGIN_CONTROL));
6045     }
6046   else
6047     CHECK_SPEC (check) = CHECK_SPEC (insn);
6048 
6049   /* Future speculations: call the helper.  */
6050   process_insn_forw_deps_be_in_spec (insn, twin, fs);
6051 
6052   if (rec != EXIT_BLOCK_PTR)
6053     {
6054       /* Which types of dependencies should we use here is,
6055 	 generally, machine-dependent question...  But, for now,
6056 	 it is not.  */
6057 
6058       if (!mutate_p)
6059 	{
6060 	  init_dep (new_dep, insn, check, REG_DEP_TRUE);
6061 	  sd_add_dep (new_dep, false);
6062 
6063 	  init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
6064 	  sd_add_dep (new_dep, false);
6065 	}
6066       else
6067 	{
6068 	  if (spec_info->dump)
6069 	    fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
6070 		     (*current_sched_info->print_insn) (insn, 0));
6071 
6072 	  /* Remove all dependencies of the INSN.  */
6073 	  {
6074 	    sd_it = sd_iterator_start (insn, (SD_LIST_FORW
6075 					      | SD_LIST_BACK
6076 					      | SD_LIST_RES_BACK));
6077 	    while (sd_iterator_cond (&sd_it, &dep))
6078 	      sd_delete_dep (sd_it);
6079 	  }
6080 
6081 	  /* If former check (INSN) already was moved to the ready (or queue)
6082 	     list, add new check (CHECK) there too.  */
6083 	  if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
6084 	    try_ready (check);
6085 
6086 	  /* Remove old check from instruction stream and free its
6087 	     data.  */
6088 	  sched_remove_insn (insn);
6089 	}
6090 
6091       init_dep (new_dep, check, twin, REG_DEP_ANTI);
6092       sd_add_dep (new_dep, false);
6093     }
6094   else
6095     {
6096       init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
6097       sd_add_dep (new_dep, false);
6098     }
6099 
6100   if (!mutate_p)
6101     /* Fix priorities.  If MUTATE_P is nonzero, this is not necessary,
6102        because it'll be done later in add_to_speculative_block.  */
6103     {
6104       rtx_vec_t priorities_roots = NULL;
6105 
6106       clear_priorities (twin, &priorities_roots);
6107       calc_priorities (priorities_roots);
6108       VEC_free (rtx, heap, priorities_roots);
6109     }
6110 }
6111 
6112 /* Removes dependency between instructions in the recovery block REC
6113    and usual region instructions.  It keeps inner dependences so it
6114    won't be necessary to recompute them.  */
6115 static void
6116 fix_recovery_deps (basic_block rec)
6117 {
6118   rtx note, insn, jump, ready_list = 0;
6119   bitmap_head in_ready;
6120   rtx link;
6121 
6122   bitmap_initialize (&in_ready, 0);
6123 
6124   /* NOTE - a basic block note.  */
6125   note = NEXT_INSN (BB_HEAD (rec));
6126   gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
6127   insn = BB_END (rec);
6128   gcc_assert (JUMP_P (insn));
6129   insn = PREV_INSN (insn);
6130 
6131   do
6132     {
6133       sd_iterator_def sd_it;
6134       dep_t dep;
6135 
6136       for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
6137 	   sd_iterator_cond (&sd_it, &dep);)
6138 	{
6139 	  rtx consumer = DEP_CON (dep);
6140 
6141 	  if (BLOCK_FOR_INSN (consumer) != rec)
6142 	    {
6143 	      sd_delete_dep (sd_it);
6144 
6145 	      if (bitmap_set_bit (&in_ready, INSN_LUID (consumer)))
6146 		ready_list = alloc_INSN_LIST (consumer, ready_list);
6147 	    }
6148 	  else
6149 	    {
6150 	      gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
6151 
6152 	      sd_iterator_next (&sd_it);
6153 	    }
6154 	}
6155 
6156       insn = PREV_INSN (insn);
6157     }
6158   while (insn != note);
6159 
6160   bitmap_clear (&in_ready);
6161 
6162   /* Try to add instructions to the ready or queue list.  */
6163   for (link = ready_list; link; link = XEXP (link, 1))
6164     try_ready (XEXP (link, 0));
6165   free_INSN_LIST_list (&ready_list);
6166 
6167   /* Fixing jump's dependences.  */
6168   insn = BB_HEAD (rec);
6169   jump = BB_END (rec);
6170 
6171   gcc_assert (LABEL_P (insn));
6172   insn = NEXT_INSN (insn);
6173 
6174   gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
6175   add_jump_dependencies (insn, jump);
6176 }
6177 
6178 /* Change pattern of INSN to NEW_PAT.  Invalidate cached haifa
6179    instruction data.  */
6180 static bool
6181 haifa_change_pattern (rtx insn, rtx new_pat)
6182 {
6183   sd_iterator_def sd_it;
6184   dep_t dep;
6185   int t;
6186 
6187   t = validate_change (insn, &PATTERN (insn), new_pat, 0);
6188   if (!t)
6189     return false;
6190   dfa_clear_single_insn_cache (insn);
6191 
6192   sd_it = sd_iterator_start (insn,
6193 			     SD_LIST_FORW | SD_LIST_BACK | SD_LIST_RES_BACK);
6194   while (sd_iterator_cond (&sd_it, &dep))
6195     {
6196       DEP_COST (dep) = UNKNOWN_DEP_COST;
6197       sd_iterator_next (&sd_it);
6198     }
6199 
6200   /* Invalidate INSN_COST, so it'll be recalculated.  */
6201   INSN_COST (insn) = -1;
6202   /* Invalidate INSN_TICK, so it'll be recalculated.  */
6203   INSN_TICK (insn) = INVALID_TICK;
6204   return true;
6205 }
6206 
6207 /* -1 - can't speculate,
6208    0 - for speculation with REQUEST mode it is OK to use
6209    current instruction pattern,
6210    1 - need to change pattern for *NEW_PAT to be speculative.  */
6211 int
6212 sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
6213 {
6214   gcc_assert (current_sched_info->flags & DO_SPECULATION
6215               && (request & SPECULATIVE)
6216 	      && sched_insn_is_legitimate_for_speculation_p (insn, request));
6217 
6218   if ((request & spec_info->mask) != request)
6219     return -1;
6220 
6221   if (request & BE_IN_SPEC
6222       && !(request & BEGIN_SPEC))
6223     return 0;
6224 
6225   return targetm.sched.speculate_insn (insn, request, new_pat);
6226 }
6227 
6228 static int
6229 haifa_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
6230 {
6231   gcc_assert (sched_deps_info->generate_spec_deps
6232 	      && !IS_SPECULATION_CHECK_P (insn));
6233 
6234   if (HAS_INTERNAL_DEP (insn)
6235       || SCHED_GROUP_P (insn))
6236     return -1;
6237 
6238   return sched_speculate_insn (insn, request, new_pat);
6239 }
6240 
6241 /* Print some information about block BB, which starts with HEAD and
6242    ends with TAIL, before scheduling it.
6243    I is zero, if scheduler is about to start with the fresh ebb.  */
6244 static void
6245 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
6246 {
6247   if (!i)
6248     fprintf (sched_dump,
6249 	     ";;   ======================================================\n");
6250   else
6251     fprintf (sched_dump,
6252 	     ";;   =====================ADVANCING TO=====================\n");
6253   fprintf (sched_dump,
6254 	   ";;   -- basic block %d from %d to %d -- %s reload\n",
6255 	   bb->index, INSN_UID (head), INSN_UID (tail),
6256 	   (reload_completed ? "after" : "before"));
6257   fprintf (sched_dump,
6258 	   ";;   ======================================================\n");
6259   fprintf (sched_dump, "\n");
6260 }
6261 
6262 /* Unlink basic block notes and labels and saves them, so they
6263    can be easily restored.  We unlink basic block notes in EBB to
6264    provide back-compatibility with the previous code, as target backends
6265    assume, that there'll be only instructions between
6266    current_sched_info->{head and tail}.  We restore these notes as soon
6267    as we can.
6268    FIRST (LAST) is the first (last) basic block in the ebb.
6269    NB: In usual case (FIRST == LAST) nothing is really done.  */
6270 void
6271 unlink_bb_notes (basic_block first, basic_block last)
6272 {
6273   /* We DON'T unlink basic block notes of the first block in the ebb.  */
6274   if (first == last)
6275     return;
6276 
6277   bb_header = XNEWVEC (rtx, last_basic_block);
6278 
6279   /* Make a sentinel.  */
6280   if (last->next_bb != EXIT_BLOCK_PTR)
6281     bb_header[last->next_bb->index] = 0;
6282 
6283   first = first->next_bb;
6284   do
6285     {
6286       rtx prev, label, note, next;
6287 
6288       label = BB_HEAD (last);
6289       if (LABEL_P (label))
6290 	note = NEXT_INSN (label);
6291       else
6292 	note = label;
6293       gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
6294 
6295       prev = PREV_INSN (label);
6296       next = NEXT_INSN (note);
6297       gcc_assert (prev && next);
6298 
6299       NEXT_INSN (prev) = next;
6300       PREV_INSN (next) = prev;
6301 
6302       bb_header[last->index] = label;
6303 
6304       if (last == first)
6305 	break;
6306 
6307       last = last->prev_bb;
6308     }
6309   while (1);
6310 }
6311 
6312 /* Restore basic block notes.
6313    FIRST is the first basic block in the ebb.  */
6314 static void
6315 restore_bb_notes (basic_block first)
6316 {
6317   if (!bb_header)
6318     return;
6319 
6320   /* We DON'T unlink basic block notes of the first block in the ebb.  */
6321   first = first->next_bb;
6322   /* Remember: FIRST is actually a second basic block in the ebb.  */
6323 
6324   while (first != EXIT_BLOCK_PTR
6325 	 && bb_header[first->index])
6326     {
6327       rtx prev, label, note, next;
6328 
6329       label = bb_header[first->index];
6330       prev = PREV_INSN (label);
6331       next = NEXT_INSN (prev);
6332 
6333       if (LABEL_P (label))
6334 	note = NEXT_INSN (label);
6335       else
6336 	note = label;
6337       gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
6338 
6339       bb_header[first->index] = 0;
6340 
6341       NEXT_INSN (prev) = label;
6342       NEXT_INSN (note) = next;
6343       PREV_INSN (next) = note;
6344 
6345       first = first->next_bb;
6346     }
6347 
6348   free (bb_header);
6349   bb_header = 0;
6350 }
6351 
6352 /* Helper function.
6353    Fix CFG after both in- and inter-block movement of
6354    control_flow_insn_p JUMP.  */
6355 static void
6356 fix_jump_move (rtx jump)
6357 {
6358   basic_block bb, jump_bb, jump_bb_next;
6359 
6360   bb = BLOCK_FOR_INSN (PREV_INSN (jump));
6361   jump_bb = BLOCK_FOR_INSN (jump);
6362   jump_bb_next = jump_bb->next_bb;
6363 
6364   gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
6365 	      || IS_SPECULATION_BRANCHY_CHECK_P (jump));
6366 
6367   if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
6368     /* if jump_bb_next is not empty.  */
6369     BB_END (jump_bb) = BB_END (jump_bb_next);
6370 
6371   if (BB_END (bb) != PREV_INSN (jump))
6372     /* Then there are instruction after jump that should be placed
6373        to jump_bb_next.  */
6374     BB_END (jump_bb_next) = BB_END (bb);
6375   else
6376     /* Otherwise jump_bb_next is empty.  */
6377     BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
6378 
6379   /* To make assertion in move_insn happy.  */
6380   BB_END (bb) = PREV_INSN (jump);
6381 
6382   update_bb_for_insn (jump_bb_next);
6383 }
6384 
6385 /* Fix CFG after interblock movement of control_flow_insn_p JUMP.  */
6386 static void
6387 move_block_after_check (rtx jump)
6388 {
6389   basic_block bb, jump_bb, jump_bb_next;
6390   VEC(edge,gc) *t;
6391 
6392   bb = BLOCK_FOR_INSN (PREV_INSN (jump));
6393   jump_bb = BLOCK_FOR_INSN (jump);
6394   jump_bb_next = jump_bb->next_bb;
6395 
6396   update_bb_for_insn (jump_bb);
6397 
6398   gcc_assert (IS_SPECULATION_CHECK_P (jump)
6399 	      || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
6400 
6401   unlink_block (jump_bb_next);
6402   link_block (jump_bb_next, bb);
6403 
6404   t = bb->succs;
6405   bb->succs = 0;
6406   move_succs (&(jump_bb->succs), bb);
6407   move_succs (&(jump_bb_next->succs), jump_bb);
6408   move_succs (&t, jump_bb_next);
6409 
6410   df_mark_solutions_dirty ();
6411 
6412   common_sched_info->fix_recovery_cfg
6413     (bb->index, jump_bb->index, jump_bb_next->index);
6414 }
6415 
6416 /* Helper function for move_block_after_check.
6417    This functions attaches edge vector pointed to by SUCCSP to
6418    block TO.  */
6419 static void
6420 move_succs (VEC(edge,gc) **succsp, basic_block to)
6421 {
6422   edge e;
6423   edge_iterator ei;
6424 
6425   gcc_assert (to->succs == 0);
6426 
6427   to->succs = *succsp;
6428 
6429   FOR_EACH_EDGE (e, ei, to->succs)
6430     e->src = to;
6431 
6432   *succsp = 0;
6433 }
6434 
6435 /* Remove INSN from the instruction stream.
6436    INSN should have any dependencies.  */
6437 static void
6438 sched_remove_insn (rtx insn)
6439 {
6440   sd_finish_insn (insn);
6441 
6442   change_queue_index (insn, QUEUE_NOWHERE);
6443   current_sched_info->add_remove_insn (insn, 1);
6444   remove_insn (insn);
6445 }
6446 
6447 /* Clear priorities of all instructions, that are forward dependent on INSN.
6448    Store in vector pointed to by ROOTS_PTR insns on which priority () should
6449    be invoked to initialize all cleared priorities.  */
6450 static void
6451 clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
6452 {
6453   sd_iterator_def sd_it;
6454   dep_t dep;
6455   bool insn_is_root_p = true;
6456 
6457   gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
6458 
6459   FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
6460     {
6461       rtx pro = DEP_PRO (dep);
6462 
6463       if (INSN_PRIORITY_STATUS (pro) >= 0
6464 	  && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
6465 	{
6466 	  /* If DEP doesn't contribute to priority then INSN itself should
6467 	     be added to priority roots.  */
6468 	  if (contributes_to_priority_p (dep))
6469 	    insn_is_root_p = false;
6470 
6471 	  INSN_PRIORITY_STATUS (pro) = -1;
6472 	  clear_priorities (pro, roots_ptr);
6473 	}
6474     }
6475 
6476   if (insn_is_root_p)
6477     VEC_safe_push (rtx, heap, *roots_ptr, insn);
6478 }
6479 
6480 /* Recompute priorities of instructions, whose priorities might have been
6481    changed.  ROOTS is a vector of instructions whose priority computation will
6482    trigger initialization of all cleared priorities.  */
6483 static void
6484 calc_priorities (rtx_vec_t roots)
6485 {
6486   int i;
6487   rtx insn;
6488 
6489   FOR_EACH_VEC_ELT (rtx, roots, i, insn)
6490     priority (insn);
6491 }
6492 
6493 
6494 /* Add dependences between JUMP and other instructions in the recovery
6495    block.  INSN is the first insn the recovery block.  */
6496 static void
6497 add_jump_dependencies (rtx insn, rtx jump)
6498 {
6499   do
6500     {
6501       insn = NEXT_INSN (insn);
6502       if (insn == jump)
6503 	break;
6504 
6505       if (dep_list_size (insn) == 0)
6506 	{
6507 	  dep_def _new_dep, *new_dep = &_new_dep;
6508 
6509 	  init_dep (new_dep, insn, jump, REG_DEP_ANTI);
6510 	  sd_add_dep (new_dep, false);
6511 	}
6512     }
6513   while (1);
6514 
6515   gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
6516 }
6517 
6518 /* Extend data structures for logical insn UID.  */
6519 void
6520 sched_extend_luids (void)
6521 {
6522   int new_luids_max_uid = get_max_uid () + 1;
6523 
6524   VEC_safe_grow_cleared (int, heap, sched_luids, new_luids_max_uid);
6525 }
6526 
6527 /* Initialize LUID for INSN.  */
6528 void
6529 sched_init_insn_luid (rtx insn)
6530 {
6531   int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
6532   int luid;
6533 
6534   if (i >= 0)
6535     {
6536       luid = sched_max_luid;
6537       sched_max_luid += i;
6538     }
6539   else
6540     luid = -1;
6541 
6542   SET_INSN_LUID (insn, luid);
6543 }
6544 
6545 /* Initialize luids for BBS.
6546    The hook common_sched_info->luid_for_non_insn () is used to determine
6547    if notes, labels, etc. need luids.  */
6548 void
6549 sched_init_luids (bb_vec_t bbs)
6550 {
6551   int i;
6552   basic_block bb;
6553 
6554   sched_extend_luids ();
6555   FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
6556     {
6557       rtx insn;
6558 
6559       FOR_BB_INSNS (bb, insn)
6560 	sched_init_insn_luid (insn);
6561     }
6562 }
6563 
6564 /* Free LUIDs.  */
6565 void
6566 sched_finish_luids (void)
6567 {
6568   VEC_free (int, heap, sched_luids);
6569   sched_max_luid = 1;
6570 }
6571 
6572 /* Return logical uid of INSN.  Helpful while debugging.  */
6573 int
6574 insn_luid (rtx insn)
6575 {
6576   return INSN_LUID (insn);
6577 }
6578 
6579 /* Extend per insn data in the target.  */
6580 void
6581 sched_extend_target (void)
6582 {
6583   if (targetm.sched.h_i_d_extended)
6584     targetm.sched.h_i_d_extended ();
6585 }
6586 
6587 /* Extend global scheduler structures (those, that live across calls to
6588    schedule_block) to include information about just emitted INSN.  */
6589 static void
6590 extend_h_i_d (void)
6591 {
6592   int reserve = (get_max_uid () + 1
6593                  - VEC_length (haifa_insn_data_def, h_i_d));
6594   if (reserve > 0
6595       && ! VEC_space (haifa_insn_data_def, h_i_d, reserve))
6596     {
6597       VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
6598                              3 * get_max_uid () / 2);
6599       sched_extend_target ();
6600     }
6601 }
6602 
6603 /* Initialize h_i_d entry of the INSN with default values.
6604    Values, that are not explicitly initialized here, hold zero.  */
6605 static void
6606 init_h_i_d (rtx insn)
6607 {
6608   if (INSN_LUID (insn) > 0)
6609     {
6610       INSN_COST (insn) = -1;
6611       QUEUE_INDEX (insn) = QUEUE_NOWHERE;
6612       INSN_TICK (insn) = INVALID_TICK;
6613       INSN_EXACT_TICK (insn) = INVALID_TICK;
6614       INTER_TICK (insn) = INVALID_TICK;
6615       TODO_SPEC (insn) = HARD_DEP;
6616     }
6617 }
6618 
6619 /* Initialize haifa_insn_data for BBS.  */
6620 void
6621 haifa_init_h_i_d (bb_vec_t bbs)
6622 {
6623   int i;
6624   basic_block bb;
6625 
6626   extend_h_i_d ();
6627   FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
6628     {
6629       rtx insn;
6630 
6631       FOR_BB_INSNS (bb, insn)
6632 	init_h_i_d (insn);
6633     }
6634 }
6635 
6636 /* Finalize haifa_insn_data.  */
6637 void
6638 haifa_finish_h_i_d (void)
6639 {
6640   int i;
6641   haifa_insn_data_t data;
6642   struct reg_use_data *use, *next;
6643 
6644   FOR_EACH_VEC_ELT (haifa_insn_data_def, h_i_d, i, data)
6645     {
6646       free (data->reg_pressure);
6647       for (use = data->reg_use_list; use != NULL; use = next)
6648 	{
6649 	  next = use->next_insn_use;
6650 	  free (use);
6651 	}
6652     }
6653   VEC_free (haifa_insn_data_def, heap, h_i_d);
6654 }
6655 
6656 /* Init data for the new insn INSN.  */
6657 static void
6658 haifa_init_insn (rtx insn)
6659 {
6660   gcc_assert (insn != NULL);
6661 
6662   sched_extend_luids ();
6663   sched_init_insn_luid (insn);
6664   sched_extend_target ();
6665   sched_deps_init (false);
6666   extend_h_i_d ();
6667   init_h_i_d (insn);
6668 
6669   if (adding_bb_to_current_region_p)
6670     {
6671       sd_init_insn (insn);
6672 
6673       /* Extend dependency caches by one element.  */
6674       extend_dependency_caches (1, false);
6675     }
6676   if (sched_pressure_p)
6677     init_insn_reg_pressure_info (insn);
6678 }
6679 
6680 /* Init data for the new basic block BB which comes after AFTER.  */
6681 static void
6682 haifa_init_only_bb (basic_block bb, basic_block after)
6683 {
6684   gcc_assert (bb != NULL);
6685 
6686   sched_init_bbs ();
6687 
6688   if (common_sched_info->add_block)
6689     /* This changes only data structures of the front-end.  */
6690     common_sched_info->add_block (bb, after);
6691 }
6692 
6693 /* A generic version of sched_split_block ().  */
6694 basic_block
6695 sched_split_block_1 (basic_block first_bb, rtx after)
6696 {
6697   edge e;
6698 
6699   e = split_block (first_bb, after);
6700   gcc_assert (e->src == first_bb);
6701 
6702   /* sched_split_block emits note if *check == BB_END.  Probably it
6703      is better to rip that note off.  */
6704 
6705   return e->dest;
6706 }
6707 
6708 /* A generic version of sched_create_empty_bb ().  */
6709 basic_block
6710 sched_create_empty_bb_1 (basic_block after)
6711 {
6712   return create_empty_bb (after);
6713 }
6714 
6715 /* Insert PAT as an INSN into the schedule and update the necessary data
6716    structures to account for it. */
6717 rtx
6718 sched_emit_insn (rtx pat)
6719 {
6720   rtx insn = emit_insn_before (pat, nonscheduled_insns_begin);
6721   haifa_init_insn (insn);
6722 
6723   if (current_sched_info->add_remove_insn)
6724     current_sched_info->add_remove_insn (insn, 0);
6725 
6726   (*current_sched_info->begin_schedule_ready) (insn);
6727   VEC_safe_push (rtx, heap, scheduled_insns, insn);
6728 
6729   last_scheduled_insn = insn;
6730   return insn;
6731 }
6732 
6733 /* This function returns a candidate satisfying dispatch constraints from
6734    the ready list.  */
6735 
6736 static rtx
6737 ready_remove_first_dispatch (struct ready_list *ready)
6738 {
6739   int i;
6740   rtx insn = ready_element (ready, 0);
6741 
6742   if (ready->n_ready == 1
6743       || INSN_CODE (insn) < 0
6744       || !INSN_P (insn)
6745       || !active_insn_p (insn)
6746       || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
6747     return ready_remove_first (ready);
6748 
6749   for (i = 1; i < ready->n_ready; i++)
6750     {
6751       insn = ready_element (ready, i);
6752 
6753       if (INSN_CODE (insn) < 0
6754 	  || !INSN_P (insn)
6755 	  || !active_insn_p (insn))
6756 	continue;
6757 
6758       if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
6759 	{
6760 	  /* Return ith element of ready.  */
6761 	  insn = ready_remove (ready, i);
6762 	  return insn;
6763 	}
6764     }
6765 
6766   if (targetm.sched.dispatch (NULL_RTX, DISPATCH_VIOLATION))
6767     return ready_remove_first (ready);
6768 
6769   for (i = 1; i < ready->n_ready; i++)
6770     {
6771       insn = ready_element (ready, i);
6772 
6773       if (INSN_CODE (insn) < 0
6774 	  || !INSN_P (insn)
6775 	  || !active_insn_p (insn))
6776 	continue;
6777 
6778       /* Return i-th element of ready.  */
6779       if (targetm.sched.dispatch (insn, IS_CMP))
6780 	return ready_remove (ready, i);
6781     }
6782 
6783   return ready_remove_first (ready);
6784 }
6785 
6786 /* Get number of ready insn in the ready list.  */
6787 
6788 int
6789 number_in_ready (void)
6790 {
6791   return ready.n_ready;
6792 }
6793 
6794 /* Get number of ready's in the ready list.  */
6795 
6796 rtx
6797 get_ready_element (int i)
6798 {
6799   return ready_element (&ready, i);
6800 }
6801 
6802 #endif /* INSN_SCHEDULING */
6803