1 /* Instruction scheduling pass.
2    Copyright (C) 1992-2019 Free Software Foundation, Inc.
3    Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4    and currently maintained by, Jim Wilson (wilson@cygnus.com)
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 /* Instruction scheduling pass.  This file, along with sched-deps.c,
23    contains the generic parts.  The actual entry point for
24    the normal instruction scheduling pass is found in sched-rgn.c.
25 
26    We compute insn priorities based on data dependencies.  Flow
27    analysis only creates a fraction of the data-dependencies we must
28    observe: namely, only those dependencies which the combiner can be
29    expected to use.  For this pass, we must therefore create the
30    remaining dependencies we need to observe: register dependencies,
31    memory dependencies, dependencies to keep function calls in order,
32    and the dependence between a conditional branch and the setting of
33    condition codes are all dealt with here.
34 
35    The scheduler first traverses the data flow graph, starting with
36    the last instruction, and proceeding to the first, assigning values
37    to insn_priority as it goes.  This sorts the instructions
38    topologically by data dependence.
39 
40    Once priorities have been established, we order the insns using
41    list scheduling.  This works as follows: starting with a list of
42    all the ready insns, and sorted according to priority number, we
43    schedule the insn from the end of the list by placing its
44    predecessors in the list according to their priority order.  We
45    consider this insn scheduled by setting the pointer to the "end" of
46    the list to point to the previous insn.  When an insn has no
47    predecessors, we either queue it until sufficient time has elapsed
48    or add it to the ready list.  As the instructions are scheduled or
49    when stalls are introduced, the queue advances and dumps insns into
50    the ready list.  When all insns down to the lowest priority have
51    been scheduled, the critical path of the basic block has been made
52    as short as possible.  The remaining insns are then scheduled in
53    remaining slots.
54 
55    The following list shows the order in which we want to break ties
56    among insns in the ready list:
57 
58    1.  choose insn with the longest path to end of bb, ties
59    broken by
60    2.  choose insn with least contribution to register pressure,
61    ties broken by
62    3.  prefer in-block upon interblock motion, ties broken by
63    4.  prefer useful upon speculative motion, ties broken by
64    5.  choose insn with largest control flow probability, ties
65    broken by
66    6.  choose insn with the least dependences upon the previously
67    scheduled insn, or finally
68    7   choose the insn which has the most insns dependent on it.
69    8.  choose insn with lowest UID.
70 
71    Memory references complicate matters.  Only if we can be certain
72    that memory references are not part of the data dependency graph
73    (via true, anti, or output dependence), can we move operations past
74    memory references.  To first approximation, reads can be done
75    independently, while writes introduce dependencies.  Better
76    approximations will yield fewer dependencies.
77 
78    Before reload, an extended analysis of interblock data dependences
79    is required for interblock scheduling.  This is performed in
80    compute_block_dependences ().
81 
82    Dependencies set up by memory references are treated in exactly the
83    same way as other dependencies, by using insn backward dependences
84    INSN_BACK_DEPS.  INSN_BACK_DEPS are translated into forward dependences
85    INSN_FORW_DEPS for the purpose of forward list scheduling.
86 
87    Having optimized the critical path, we may have also unduly
88    extended the lifetimes of some registers.  If an operation requires
89    that constants be loaded into registers, it is certainly desirable
90    to load those constants as early as necessary, but no earlier.
91    I.e., it will not do to load up a bunch of registers at the
92    beginning of a basic block only to use them at the end, if they
93    could be loaded later, since this may result in excessive register
94    utilization.
95 
96    Note that since branches are never in basic blocks, but only end
97    basic blocks, this pass will not move branches.  But that is ok,
98    since we can use GNU's delayed branch scheduling pass to take care
99    of this case.
100 
101    Also note that no further optimizations based on algebraic
102    identities are performed, so this pass would be a good one to
103    perform instruction splitting, such as breaking up a multiply
104    instruction into shifts and adds where that is profitable.
105 
106    Given the memory aliasing analysis that this pass should perform,
107    it should be possible to remove redundant stores to memory, and to
108    load values from registers instead of hitting memory.
109 
110    Before reload, speculative insns are moved only if a 'proof' exists
111    that no exception will be caused by this, and if no live registers
112    exist that inhibit the motion (live registers constraints are not
113    represented by data dependence edges).
114 
115    This pass must update information that subsequent passes expect to
116    be correct.  Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
117    reg_n_calls_crossed, and reg_live_length.  Also, BB_HEAD, BB_END.
118 
119    The information in the line number notes is carefully retained by
120    this pass.  Notes that refer to the starting and ending of
121    exception regions are also carefully retained by this pass.  All
122    other NOTE insns are grouped in their same relative order at the
123    beginning of basic blocks and regions that have been scheduled.  */
124 
125 #include "config.h"
126 #include "system.h"
127 #include "coretypes.h"
128 #include "backend.h"
129 #include "target.h"
130 #include "rtl.h"
131 #include "cfghooks.h"
132 #include "df.h"
133 #include "memmodel.h"
134 #include "tm_p.h"
135 #include "insn-config.h"
136 #include "regs.h"
137 #include "ira.h"
138 #include "recog.h"
139 #include "insn-attr.h"
140 #include "cfgrtl.h"
141 #include "cfgbuild.h"
142 #include "sched-int.h"
143 #include "common/common-target.h"
144 #include "params.h"
145 #include "dbgcnt.h"
146 #include "cfgloop.h"
147 #include "dumpfile.h"
148 #include "print-rtl.h"
149 
150 #ifdef INSN_SCHEDULING
151 
152 /* True if we do register pressure relief through live-range
153    shrinkage.  */
154 static bool live_range_shrinkage_p;
155 
156 /* Switch on live range shrinkage.  */
157 void
initialize_live_range_shrinkage(void)158 initialize_live_range_shrinkage (void)
159 {
160   live_range_shrinkage_p = true;
161 }
162 
163 /* Switch off live range shrinkage.  */
164 void
finish_live_range_shrinkage(void)165 finish_live_range_shrinkage (void)
166 {
167   live_range_shrinkage_p = false;
168 }
169 
170 /* issue_rate is the number of insns that can be scheduled in the same
171    machine cycle.  It can be defined in the config/mach/mach.h file,
172    otherwise we set it to 1.  */
173 
174 int issue_rate;
175 
176 /* This can be set to true by a backend if the scheduler should not
177    enable a DCE pass.  */
178 bool sched_no_dce;
179 
180 /* The current initiation interval used when modulo scheduling.  */
181 static int modulo_ii;
182 
183 /* The maximum number of stages we are prepared to handle.  */
184 static int modulo_max_stages;
185 
186 /* The number of insns that exist in each iteration of the loop.  We use this
187    to detect when we've scheduled all insns from the first iteration.  */
188 static int modulo_n_insns;
189 
190 /* The current count of insns in the first iteration of the loop that have
191    already been scheduled.  */
192 static int modulo_insns_scheduled;
193 
194 /* The maximum uid of insns from the first iteration of the loop.  */
195 static int modulo_iter0_max_uid;
196 
197 /* The number of times we should attempt to backtrack when modulo scheduling.
198    Decreased each time we have to backtrack.  */
199 static int modulo_backtracks_left;
200 
201 /* The stage in which the last insn from the original loop was
202    scheduled.  */
203 static int modulo_last_stage;
204 
205 /* sched-verbose controls the amount of debugging output the
206    scheduler prints.  It is controlled by -fsched-verbose=N:
207    N=0: no debugging output.
208    N=1: default value.
209    N=2: bb's probabilities, detailed ready list info, unit/insn info.
210    N=3: rtl at abort point, control-flow, regions info.
211    N=5: dependences info.  */
212 int sched_verbose = 0;
213 
214 /* Debugging file.  All printouts are sent to dump. */
215 FILE *sched_dump = 0;
216 
217 /* This is a placeholder for the scheduler parameters common
218    to all schedulers.  */
219 struct common_sched_info_def *common_sched_info;
220 
221 #define INSN_TICK(INSN)	(HID (INSN)->tick)
222 #define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick)
223 #define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate)
224 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
225 #define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
226 #define SHADOW_P(INSN) (HID (INSN)->shadow_p)
227 #define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
228 /* Cached cost of the instruction.  Use insn_sched_cost to get cost of the
229    insn.  -1 here means that the field is not initialized.  */
230 #define INSN_COST(INSN)	(HID (INSN)->cost)
231 
232 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
233    then it should be recalculated from scratch.  */
234 #define INVALID_TICK (-(max_insn_queue_index + 1))
235 /* The minimal value of the INSN_TICK of an instruction.  */
236 #define MIN_TICK (-max_insn_queue_index)
237 
238 /* Original order of insns in the ready list.
239    Used to keep order of normal insns while separating DEBUG_INSNs.  */
240 #define INSN_RFS_DEBUG_ORIG_ORDER(INSN) (HID (INSN)->rfs_debug_orig_order)
241 
242 /* The deciding reason for INSN's place in the ready list.  */
243 #define INSN_LAST_RFS_WIN(INSN) (HID (INSN)->last_rfs_win)
244 
245 /* List of important notes we must keep around.  This is a pointer to the
246    last element in the list.  */
247 rtx_insn *note_list;
248 
249 static struct spec_info_def spec_info_var;
250 /* Description of the speculative part of the scheduling.
251    If NULL - no speculation.  */
252 spec_info_t spec_info = NULL;
253 
254 /* True, if recovery block was added during scheduling of current block.
255    Used to determine, if we need to fix INSN_TICKs.  */
256 static bool haifa_recovery_bb_recently_added_p;
257 
258 /* True, if recovery block was added during this scheduling pass.
259    Used to determine if we should have empty memory pools of dependencies
260    after finishing current region.  */
261 bool haifa_recovery_bb_ever_added_p;
262 
263 /* Counters of different types of speculative instructions.  */
264 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
265 
266 /* Array used in {unlink, restore}_bb_notes.  */
267 static rtx_insn **bb_header = 0;
268 
269 /* Basic block after which recovery blocks will be created.  */
270 static basic_block before_recovery;
271 
272 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
273    created it.  */
274 basic_block after_recovery;
275 
276 /* FALSE if we add bb to another region, so we don't need to initialize it.  */
277 bool adding_bb_to_current_region_p = true;
278 
279 /* Queues, etc.  */
280 
281 /* An instruction is ready to be scheduled when all insns preceding it
282    have already been scheduled.  It is important to ensure that all
283    insns which use its result will not be executed until its result
284    has been computed.  An insn is maintained in one of four structures:
285 
286    (P) the "Pending" set of insns which cannot be scheduled until
287    their dependencies have been satisfied.
288    (Q) the "Queued" set of insns that can be scheduled when sufficient
289    time has passed.
290    (R) the "Ready" list of unscheduled, uncommitted insns.
291    (S) the "Scheduled" list of insns.
292 
293    Initially, all insns are either "Pending" or "Ready" depending on
294    whether their dependencies are satisfied.
295 
296    Insns move from the "Ready" list to the "Scheduled" list as they
297    are committed to the schedule.  As this occurs, the insns in the
298    "Pending" list have their dependencies satisfied and move to either
299    the "Ready" list or the "Queued" set depending on whether
300    sufficient time has passed to make them ready.  As time passes,
301    insns move from the "Queued" set to the "Ready" list.
302 
303    The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
304    unscheduled insns, i.e., those that are ready, queued, and pending.
305    The "Queued" set (Q) is implemented by the variable `insn_queue'.
306    The "Ready" list (R) is implemented by the variables `ready' and
307    `n_ready'.
308    The "Scheduled" list (S) is the new insn chain built by this pass.
309 
310    The transition (R->S) is implemented in the scheduling loop in
311    `schedule_block' when the best insn to schedule is chosen.
312    The transitions (P->R and P->Q) are implemented in `schedule_insn' as
313    insns move from the ready list to the scheduled list.
314    The transition (Q->R) is implemented in 'queue_to_insn' as time
315    passes or stalls are introduced.  */
316 
317 /* Implement a circular buffer to delay instructions until sufficient
318    time has passed.  For the new pipeline description interface,
319    MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
320    than maximal time of instruction execution computed by genattr.c on
321    the base maximal time of functional unit reservations and getting a
322    result.  This is the longest time an insn may be queued.  */
323 
324 static rtx_insn_list **insn_queue;
325 static int q_ptr = 0;
326 static int q_size = 0;
327 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
328 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
329 
330 #define QUEUE_SCHEDULED (-3)
331 #define QUEUE_NOWHERE   (-2)
332 #define QUEUE_READY     (-1)
333 /* QUEUE_SCHEDULED - INSN is scheduled.
334    QUEUE_NOWHERE   - INSN isn't scheduled yet and is neither in
335    queue or ready list.
336    QUEUE_READY     - INSN is in ready list.
337    N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles.  */
338 
339 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
340 
341 /* The following variable value refers for all current and future
342    reservations of the processor units.  */
343 state_t curr_state;
344 
345 /* The following variable value is size of memory representing all
346    current and future reservations of the processor units.  */
347 size_t dfa_state_size;
348 
349 /* The following array is used to find the best insn from ready when
350    the automaton pipeline interface is used.  */
351 signed char *ready_try = NULL;
352 
353 /* The ready list.  */
354 struct ready_list ready = {NULL, 0, 0, 0, 0};
355 
356 /* The pointer to the ready list (to be removed).  */
357 static struct ready_list *readyp = &ready;
358 
359 /* Scheduling clock.  */
360 static int clock_var;
361 
362 /* Clock at which the previous instruction was issued.  */
363 static int last_clock_var;
364 
365 /* Set to true if, when queuing a shadow insn, we discover that it would be
366    scheduled too late.  */
367 static bool must_backtrack;
368 
369 /* The following variable value is number of essential insns issued on
370    the current cycle.  An insn is essential one if it changes the
371    processors state.  */
372 int cycle_issued_insns;
373 
374 /* This records the actual schedule.  It is built up during the main phase
375    of schedule_block, and afterwards used to reorder the insns in the RTL.  */
376 static vec<rtx_insn *> scheduled_insns;
377 
378 static int may_trap_exp (const_rtx, int);
379 
380 /* Nonzero iff the address is comprised from at most 1 register.  */
381 #define CONST_BASED_ADDRESS_P(x)			\
382   (REG_P (x)					\
383    || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS	\
384 	|| (GET_CODE (x) == LO_SUM))			\
385        && (CONSTANT_P (XEXP (x, 0))			\
386 	   || CONSTANT_P (XEXP (x, 1)))))
387 
388 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
389    as found by analyzing insn's expression.  */
390 
391 
392 static int haifa_luid_for_non_insn (rtx x);
393 
394 /* Haifa version of sched_info hooks common to all headers.  */
395 const struct common_sched_info_def haifa_common_sched_info =
396   {
397     NULL, /* fix_recovery_cfg */
398     NULL, /* add_block */
399     NULL, /* estimate_number_of_insns */
400     haifa_luid_for_non_insn, /* luid_for_non_insn */
401     SCHED_PASS_UNKNOWN /* sched_pass_id */
402   };
403 
404 /* Mapping from instruction UID to its Logical UID.  */
405 vec<int> sched_luids;
406 
407 /* Next LUID to assign to an instruction.  */
408 int sched_max_luid = 1;
409 
410 /* Haifa Instruction Data.  */
411 vec<haifa_insn_data_def> h_i_d;
412 
413 void (* sched_init_only_bb) (basic_block, basic_block);
414 
415 /* Split block function.  Different schedulers might use different functions
416    to handle their internal data consistent.  */
417 basic_block (* sched_split_block) (basic_block, rtx);
418 
419 /* Create empty basic block after the specified block.  */
420 basic_block (* sched_create_empty_bb) (basic_block);
421 
422 /* Return the number of cycles until INSN is expected to be ready.
423    Return zero if it already is.  */
424 static int
insn_delay(rtx_insn * insn)425 insn_delay (rtx_insn *insn)
426 {
427   return MAX (INSN_TICK (insn) - clock_var, 0);
428 }
429 
430 static int
may_trap_exp(const_rtx x,int is_store)431 may_trap_exp (const_rtx x, int is_store)
432 {
433   enum rtx_code code;
434 
435   if (x == 0)
436     return TRAP_FREE;
437   code = GET_CODE (x);
438   if (is_store)
439     {
440       if (code == MEM && may_trap_p (x))
441 	return TRAP_RISKY;
442       else
443 	return TRAP_FREE;
444     }
445   if (code == MEM)
446     {
447       /* The insn uses memory:  a volatile load.  */
448       if (MEM_VOLATILE_P (x))
449 	return IRISKY;
450       /* An exception-free load.  */
451       if (!may_trap_p (x))
452 	return IFREE;
453       /* A load with 1 base register, to be further checked.  */
454       if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
455 	return PFREE_CANDIDATE;
456       /* No info on the load, to be further checked.  */
457       return PRISKY_CANDIDATE;
458     }
459   else
460     {
461       const char *fmt;
462       int i, insn_class = TRAP_FREE;
463 
464       /* Neither store nor load, check if it may cause a trap.  */
465       if (may_trap_p (x))
466 	return TRAP_RISKY;
467       /* Recursive step: walk the insn...  */
468       fmt = GET_RTX_FORMAT (code);
469       for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
470 	{
471 	  if (fmt[i] == 'e')
472 	    {
473 	      int tmp_class = may_trap_exp (XEXP (x, i), is_store);
474 	      insn_class = WORST_CLASS (insn_class, tmp_class);
475 	    }
476 	  else if (fmt[i] == 'E')
477 	    {
478 	      int j;
479 	      for (j = 0; j < XVECLEN (x, i); j++)
480 		{
481 		  int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
482 		  insn_class = WORST_CLASS (insn_class, tmp_class);
483 		  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
484 		    break;
485 		}
486 	    }
487 	  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
488 	    break;
489 	}
490       return insn_class;
491     }
492 }
493 
494 /* Classifies rtx X of an insn for the purpose of verifying that X can be
495    executed speculatively (and consequently the insn can be moved
496    speculatively), by examining X, returning:
497    TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
498    TRAP_FREE: non-load insn.
499    IFREE: load from a globally safe location.
500    IRISKY: volatile load.
501    PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
502    being either PFREE or PRISKY.  */
503 
504 static int
haifa_classify_rtx(const_rtx x)505 haifa_classify_rtx (const_rtx x)
506 {
507   int tmp_class = TRAP_FREE;
508   int insn_class = TRAP_FREE;
509   enum rtx_code code;
510 
511   if (GET_CODE (x) == PARALLEL)
512     {
513       int i, len = XVECLEN (x, 0);
514 
515       for (i = len - 1; i >= 0; i--)
516 	{
517 	  tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
518 	  insn_class = WORST_CLASS (insn_class, tmp_class);
519 	  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
520 	    break;
521 	}
522     }
523   else
524     {
525       code = GET_CODE (x);
526       switch (code)
527 	{
528 	case CLOBBER:
529 	  /* Test if it is a 'store'.  */
530 	  tmp_class = may_trap_exp (XEXP (x, 0), 1);
531 	  break;
532 	case CLOBBER_HIGH:
533 	  gcc_assert (REG_P (XEXP (x, 0)));
534 	  break;
535 	case SET:
536 	  /* Test if it is a store.  */
537 	  tmp_class = may_trap_exp (SET_DEST (x), 1);
538 	  if (tmp_class == TRAP_RISKY)
539 	    break;
540 	  /* Test if it is a load.  */
541 	  tmp_class =
542 	    WORST_CLASS (tmp_class,
543 			 may_trap_exp (SET_SRC (x), 0));
544 	  break;
545 	case COND_EXEC:
546 	  tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
547 	  if (tmp_class == TRAP_RISKY)
548 	    break;
549 	  tmp_class = WORST_CLASS (tmp_class,
550 				   may_trap_exp (COND_EXEC_TEST (x), 0));
551 	  break;
552 	case TRAP_IF:
553 	  tmp_class = TRAP_RISKY;
554 	  break;
555 	default:;
556 	}
557       insn_class = tmp_class;
558     }
559 
560   return insn_class;
561 }
562 
563 int
haifa_classify_insn(const_rtx insn)564 haifa_classify_insn (const_rtx insn)
565 {
566   return haifa_classify_rtx (PATTERN (insn));
567 }
568 
569 /* After the scheduler initialization function has been called, this function
570    can be called to enable modulo scheduling.  II is the initiation interval
571    we should use, it affects the delays for delay_pairs that were recorded as
572    separated by a given number of stages.
573 
574    MAX_STAGES provides us with a limit
575    after which we give up scheduling; the caller must have unrolled at least
576    as many copies of the loop body and recorded delay_pairs for them.
577 
578    INSNS is the number of real (non-debug) insns in one iteration of
579    the loop.  MAX_UID can be used to test whether an insn belongs to
580    the first iteration of the loop; all of them have a uid lower than
581    MAX_UID.  */
582 void
set_modulo_params(int ii,int max_stages,int insns,int max_uid)583 set_modulo_params (int ii, int max_stages, int insns, int max_uid)
584 {
585   modulo_ii = ii;
586   modulo_max_stages = max_stages;
587   modulo_n_insns = insns;
588   modulo_iter0_max_uid = max_uid;
589   modulo_backtracks_left = PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS);
590 }
591 
592 /* A structure to record a pair of insns where the first one is a real
593    insn that has delay slots, and the second is its delayed shadow.
594    I1 is scheduled normally and will emit an assembly instruction,
595    while I2 describes the side effect that takes place at the
596    transition between cycles CYCLES and (CYCLES + 1) after I1.  */
597 struct delay_pair
598 {
599   struct delay_pair *next_same_i1;
600   rtx_insn *i1, *i2;
601   int cycles;
602   /* When doing modulo scheduling, we a delay_pair can also be used to
603      show that I1 and I2 are the same insn in a different stage.  If that
604      is the case, STAGES will be nonzero.  */
605   int stages;
606 };
607 
608 /* Helpers for delay hashing.  */
609 
610 struct delay_i1_hasher : nofree_ptr_hash <delay_pair>
611 {
612   typedef void *compare_type;
613   static inline hashval_t hash (const delay_pair *);
614   static inline bool equal (const delay_pair *, const void *);
615 };
616 
617 /* Returns a hash value for X, based on hashing just I1.  */
618 
619 inline hashval_t
hash(const delay_pair * x)620 delay_i1_hasher::hash (const delay_pair *x)
621 {
622   return htab_hash_pointer (x->i1);
623 }
624 
625 /* Return true if I1 of pair X is the same as that of pair Y.  */
626 
627 inline bool
equal(const delay_pair * x,const void * y)628 delay_i1_hasher::equal (const delay_pair *x, const void *y)
629 {
630   return x->i1 == y;
631 }
632 
633 struct delay_i2_hasher : free_ptr_hash <delay_pair>
634 {
635   typedef void *compare_type;
636   static inline hashval_t hash (const delay_pair *);
637   static inline bool equal (const delay_pair *, const void *);
638 };
639 
640 /* Returns a hash value for X, based on hashing just I2.  */
641 
642 inline hashval_t
hash(const delay_pair * x)643 delay_i2_hasher::hash (const delay_pair *x)
644 {
645   return htab_hash_pointer (x->i2);
646 }
647 
648 /* Return true if I2 of pair X is the same as that of pair Y.  */
649 
650 inline bool
equal(const delay_pair * x,const void * y)651 delay_i2_hasher::equal (const delay_pair *x, const void *y)
652 {
653   return x->i2 == y;
654 }
655 
656 /* Two hash tables to record delay_pairs, one indexed by I1 and the other
657    indexed by I2.  */
658 static hash_table<delay_i1_hasher> *delay_htab;
659 static hash_table<delay_i2_hasher> *delay_htab_i2;
660 
661 /* Called through htab_traverse.  Walk the hashtable using I2 as
662    index, and delete all elements involving an UID higher than
663    that pointed to by *DATA.  */
664 int
haifa_htab_i2_traverse(delay_pair ** slot,int * data)665 haifa_htab_i2_traverse (delay_pair **slot, int *data)
666 {
667   int maxuid = *data;
668   struct delay_pair *p = *slot;
669   if (INSN_UID (p->i2) >= maxuid || INSN_UID (p->i1) >= maxuid)
670     {
671       delay_htab_i2->clear_slot (slot);
672     }
673   return 1;
674 }
675 
676 /* Called through htab_traverse.  Walk the hashtable using I2 as
677    index, and delete all elements involving an UID higher than
678    that pointed to by *DATA.  */
679 int
haifa_htab_i1_traverse(delay_pair ** pslot,int * data)680 haifa_htab_i1_traverse (delay_pair **pslot, int *data)
681 {
682   int maxuid = *data;
683   struct delay_pair *p, *first, **pprev;
684 
685   if (INSN_UID ((*pslot)->i1) >= maxuid)
686     {
687       delay_htab->clear_slot (pslot);
688       return 1;
689     }
690   pprev = &first;
691   for (p = *pslot; p; p = p->next_same_i1)
692     {
693       if (INSN_UID (p->i2) < maxuid)
694 	{
695 	  *pprev = p;
696 	  pprev = &p->next_same_i1;
697 	}
698     }
699   *pprev = NULL;
700   if (first == NULL)
701     delay_htab->clear_slot (pslot);
702   else
703     *pslot = first;
704   return 1;
705 }
706 
707 /* Discard all delay pairs which involve an insn with an UID higher
708    than MAX_UID.  */
709 void
discard_delay_pairs_above(int max_uid)710 discard_delay_pairs_above (int max_uid)
711 {
712   delay_htab->traverse <int *, haifa_htab_i1_traverse> (&max_uid);
713   delay_htab_i2->traverse <int *, haifa_htab_i2_traverse> (&max_uid);
714 }
715 
716 /* This function can be called by a port just before it starts the final
717    scheduling pass.  It records the fact that an instruction with delay
718    slots has been split into two insns, I1 and I2.  The first one will be
719    scheduled normally and initiates the operation.  The second one is a
720    shadow which must follow a specific number of cycles after I1; its only
721    purpose is to show the side effect that occurs at that cycle in the RTL.
722    If a JUMP_INSN or a CALL_INSN has been split, I1 should be a normal INSN,
723    while I2 retains the original insn type.
724 
725    There are two ways in which the number of cycles can be specified,
726    involving the CYCLES and STAGES arguments to this function.  If STAGES
727    is zero, we just use the value of CYCLES.  Otherwise, STAGES is a factor
728    which is multiplied by MODULO_II to give the number of cycles.  This is
729    only useful if the caller also calls set_modulo_params to enable modulo
730    scheduling.  */
731 
732 void
record_delay_slot_pair(rtx_insn * i1,rtx_insn * i2,int cycles,int stages)733 record_delay_slot_pair (rtx_insn *i1, rtx_insn *i2, int cycles, int stages)
734 {
735   struct delay_pair *p = XNEW (struct delay_pair);
736   struct delay_pair **slot;
737 
738   p->i1 = i1;
739   p->i2 = i2;
740   p->cycles = cycles;
741   p->stages = stages;
742 
743   if (!delay_htab)
744     {
745       delay_htab = new hash_table<delay_i1_hasher> (10);
746       delay_htab_i2 = new hash_table<delay_i2_hasher> (10);
747     }
748   slot = delay_htab->find_slot_with_hash (i1, htab_hash_pointer (i1), INSERT);
749   p->next_same_i1 = *slot;
750   *slot = p;
751   slot = delay_htab_i2->find_slot (p, INSERT);
752   *slot = p;
753 }
754 
755 /* Examine the delay pair hashtable to see if INSN is a shadow for another,
756    and return the other insn if so.  Return NULL otherwise.  */
757 rtx_insn *
real_insn_for_shadow(rtx_insn * insn)758 real_insn_for_shadow (rtx_insn *insn)
759 {
760   struct delay_pair *pair;
761 
762   if (!delay_htab)
763     return NULL;
764 
765   pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
766   if (!pair || pair->stages > 0)
767     return NULL;
768   return pair->i1;
769 }
770 
771 /* For a pair P of insns, return the fixed distance in cycles from the first
772    insn after which the second must be scheduled.  */
773 static int
pair_delay(struct delay_pair * p)774 pair_delay (struct delay_pair *p)
775 {
776   if (p->stages == 0)
777     return p->cycles;
778   else
779     return p->stages * modulo_ii;
780 }
781 
782 /* Given an insn INSN, add a dependence on its delayed shadow if it
783    has one.  Also try to find situations where shadows depend on each other
784    and add dependencies to the real insns to limit the amount of backtracking
785    needed.  */
786 void
add_delay_dependencies(rtx_insn * insn)787 add_delay_dependencies (rtx_insn *insn)
788 {
789   struct delay_pair *pair;
790   sd_iterator_def sd_it;
791   dep_t dep;
792 
793   if (!delay_htab)
794     return;
795 
796   pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
797   if (!pair)
798     return;
799   add_dependence (insn, pair->i1, REG_DEP_ANTI);
800   if (pair->stages)
801     return;
802 
803   FOR_EACH_DEP (pair->i2, SD_LIST_BACK, sd_it, dep)
804     {
805       rtx_insn *pro = DEP_PRO (dep);
806       struct delay_pair *other_pair
807 	= delay_htab_i2->find_with_hash (pro, htab_hash_pointer (pro));
808       if (!other_pair || other_pair->stages)
809 	continue;
810       if (pair_delay (other_pair) >= pair_delay (pair))
811 	{
812 	  if (sched_verbose >= 4)
813 	    {
814 	      fprintf (sched_dump, ";;\tadding dependence %d <- %d\n",
815 		       INSN_UID (other_pair->i1),
816 		       INSN_UID (pair->i1));
817 	      fprintf (sched_dump, ";;\tpair1 %d <- %d, cost %d\n",
818 		       INSN_UID (pair->i1),
819 		       INSN_UID (pair->i2),
820 		       pair_delay (pair));
821 	      fprintf (sched_dump, ";;\tpair2 %d <- %d, cost %d\n",
822 		       INSN_UID (other_pair->i1),
823 		       INSN_UID (other_pair->i2),
824 		       pair_delay (other_pair));
825 	    }
826 	  add_dependence (pair->i1, other_pair->i1, REG_DEP_ANTI);
827 	}
828     }
829 }
830 
831 /* Forward declarations.  */
832 
833 static int priority (rtx_insn *, bool force_recompute = false);
834 static int autopref_rank_for_schedule (const rtx_insn *, const rtx_insn *);
835 static int rank_for_schedule (const void *, const void *);
836 static void swap_sort (rtx_insn **, int);
837 static void queue_insn (rtx_insn *, int, const char *);
838 static int schedule_insn (rtx_insn *);
839 static void adjust_priority (rtx_insn *);
840 static void advance_one_cycle (void);
841 static void extend_h_i_d (void);
842 
843 
844 /* Notes handling mechanism:
845    =========================
846    Generally, NOTES are saved before scheduling and restored after scheduling.
847    The scheduler distinguishes between two types of notes:
848 
849    (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
850    Before scheduling a region, a pointer to the note is added to the insn
851    that follows or precedes it.  (This happens as part of the data dependence
852    computation).  After scheduling an insn, the pointer contained in it is
853    used for regenerating the corresponding note (in reemit_notes).
854 
855    (2) All other notes (e.g. INSN_DELETED):  Before scheduling a block,
856    these notes are put in a list (in rm_other_notes() and
857    unlink_other_notes ()).  After scheduling the block, these notes are
858    inserted at the beginning of the block (in schedule_block()).  */
859 
860 static void ready_add (struct ready_list *, rtx_insn *, bool);
861 static rtx_insn *ready_remove_first (struct ready_list *);
862 static rtx_insn *ready_remove_first_dispatch (struct ready_list *ready);
863 
864 static void queue_to_ready (struct ready_list *);
865 static int early_queue_to_ready (state_t, struct ready_list *);
866 
867 /* The following functions are used to implement multi-pass scheduling
868    on the first cycle.  */
869 static rtx_insn *ready_remove (struct ready_list *, int);
870 static void ready_remove_insn (rtx_insn *);
871 
872 static void fix_inter_tick (rtx_insn *, rtx_insn *);
873 static int fix_tick_ready (rtx_insn *);
874 static void change_queue_index (rtx_insn *, int);
875 
876 /* The following functions are used to implement scheduling of data/control
877    speculative instructions.  */
878 
879 static void extend_h_i_d (void);
880 static void init_h_i_d (rtx_insn *);
881 static int haifa_speculate_insn (rtx_insn *, ds_t, rtx *);
882 static void generate_recovery_code (rtx_insn *);
883 static void process_insn_forw_deps_be_in_spec (rtx_insn *, rtx_insn *, ds_t);
884 static void begin_speculative_block (rtx_insn *);
885 static void add_to_speculative_block (rtx_insn *);
886 static void init_before_recovery (basic_block *);
887 static void create_check_block_twin (rtx_insn *, bool);
888 static void fix_recovery_deps (basic_block);
889 static bool haifa_change_pattern (rtx_insn *, rtx);
890 static void dump_new_block_header (int, basic_block, rtx_insn *, rtx_insn *);
891 static void restore_bb_notes (basic_block);
892 static void fix_jump_move (rtx_insn *);
893 static void move_block_after_check (rtx_insn *);
894 static void move_succs (vec<edge, va_gc> **, basic_block);
895 static void sched_remove_insn (rtx_insn *);
896 static void clear_priorities (rtx_insn *, rtx_vec_t *);
897 static void calc_priorities (rtx_vec_t);
898 static void add_jump_dependencies (rtx_insn *, rtx_insn *);
899 
900 #endif /* INSN_SCHEDULING */
901 
902 /* Point to state used for the current scheduling pass.  */
903 struct haifa_sched_info *current_sched_info;
904 
905 #ifndef INSN_SCHEDULING
906 void
schedule_insns(void)907 schedule_insns (void)
908 {
909 }
910 #else
911 
912 /* Do register pressure sensitive insn scheduling if the flag is set
913    up.  */
914 enum sched_pressure_algorithm sched_pressure;
915 
916 /* Map regno -> its pressure class.  The map defined only when
917    SCHED_PRESSURE != SCHED_PRESSURE_NONE.  */
918 enum reg_class *sched_regno_pressure_class;
919 
920 /* The current register pressure.  Only elements corresponding pressure
921    classes are defined.  */
922 static int curr_reg_pressure[N_REG_CLASSES];
923 
924 /* Saved value of the previous array.  */
925 static int saved_reg_pressure[N_REG_CLASSES];
926 
927 /* Register living at given scheduling point.  */
928 static bitmap curr_reg_live;
929 
930 /* Saved value of the previous array.  */
931 static bitmap saved_reg_live;
932 
933 /* Registers mentioned in the current region.  */
934 static bitmap region_ref_regs;
935 
936 /* Temporary bitmap used for SCHED_PRESSURE_MODEL.  */
937 static bitmap tmp_bitmap;
938 
939 /* Effective number of available registers of a given class (see comment
940    in sched_pressure_start_bb).  */
941 static int sched_class_regs_num[N_REG_CLASSES];
942 /* Number of call_saved_regs and fixed_regs.  Helpers for calculating of
943    sched_class_regs_num.  */
944 static int call_saved_regs_num[N_REG_CLASSES];
945 static int fixed_regs_num[N_REG_CLASSES];
946 
947 /* Initiate register pressure relative info for scheduling the current
948    region.  Currently it is only clearing register mentioned in the
949    current region.  */
950 void
sched_init_region_reg_pressure_info(void)951 sched_init_region_reg_pressure_info (void)
952 {
953   bitmap_clear (region_ref_regs);
954 }
955 
956 /* PRESSURE[CL] describes the pressure on register class CL.  Update it
957    for the birth (if BIRTH_P) or death (if !BIRTH_P) of register REGNO.
958    LIVE tracks the set of live registers; if it is null, assume that
959    every birth or death is genuine.  */
960 static inline void
mark_regno_birth_or_death(bitmap live,int * pressure,int regno,bool birth_p)961 mark_regno_birth_or_death (bitmap live, int *pressure, int regno, bool birth_p)
962 {
963   enum reg_class pressure_class;
964 
965   pressure_class = sched_regno_pressure_class[regno];
966   if (regno >= FIRST_PSEUDO_REGISTER)
967     {
968       if (pressure_class != NO_REGS)
969 	{
970 	  if (birth_p)
971 	    {
972 	      if (!live || bitmap_set_bit (live, regno))
973 		pressure[pressure_class]
974 		  += (ira_reg_class_max_nregs
975 		      [pressure_class][PSEUDO_REGNO_MODE (regno)]);
976 	    }
977 	  else
978 	    {
979 	      if (!live || bitmap_clear_bit (live, regno))
980 		pressure[pressure_class]
981 		  -= (ira_reg_class_max_nregs
982 		      [pressure_class][PSEUDO_REGNO_MODE (regno)]);
983 	    }
984 	}
985     }
986   else if (pressure_class != NO_REGS
987 	   && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
988     {
989       if (birth_p)
990 	{
991 	  if (!live || bitmap_set_bit (live, regno))
992 	    pressure[pressure_class]++;
993 	}
994       else
995 	{
996 	  if (!live || bitmap_clear_bit (live, regno))
997 	    pressure[pressure_class]--;
998 	}
999     }
1000 }
1001 
1002 /* Initiate current register pressure related info from living
1003    registers given by LIVE.  */
1004 static void
initiate_reg_pressure_info(bitmap live)1005 initiate_reg_pressure_info (bitmap live)
1006 {
1007   int i;
1008   unsigned int j;
1009   bitmap_iterator bi;
1010 
1011   for (i = 0; i < ira_pressure_classes_num; i++)
1012     curr_reg_pressure[ira_pressure_classes[i]] = 0;
1013   bitmap_clear (curr_reg_live);
1014   EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
1015     if (sched_pressure == SCHED_PRESSURE_MODEL
1016 	|| current_nr_blocks == 1
1017 	|| bitmap_bit_p (region_ref_regs, j))
1018       mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure, j, true);
1019 }
1020 
1021 /* Mark registers in X as mentioned in the current region.  */
1022 static void
setup_ref_regs(rtx x)1023 setup_ref_regs (rtx x)
1024 {
1025   int i, j;
1026   const RTX_CODE code = GET_CODE (x);
1027   const char *fmt;
1028 
1029   if (REG_P (x))
1030     {
1031       bitmap_set_range (region_ref_regs, REGNO (x), REG_NREGS (x));
1032       return;
1033     }
1034   fmt = GET_RTX_FORMAT (code);
1035   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1036     if (fmt[i] == 'e')
1037       setup_ref_regs (XEXP (x, i));
1038     else if (fmt[i] == 'E')
1039       {
1040 	for (j = 0; j < XVECLEN (x, i); j++)
1041 	  setup_ref_regs (XVECEXP (x, i, j));
1042       }
1043 }
1044 
1045 /* Initiate current register pressure related info at the start of
1046    basic block BB.  */
1047 static void
initiate_bb_reg_pressure_info(basic_block bb)1048 initiate_bb_reg_pressure_info (basic_block bb)
1049 {
1050   unsigned int i ATTRIBUTE_UNUSED;
1051   rtx_insn *insn;
1052 
1053   if (current_nr_blocks > 1)
1054     FOR_BB_INSNS (bb, insn)
1055       if (NONDEBUG_INSN_P (insn))
1056 	setup_ref_regs (PATTERN (insn));
1057   initiate_reg_pressure_info (df_get_live_in (bb));
1058   if (bb_has_eh_pred (bb))
1059     for (i = 0; ; ++i)
1060       {
1061 	unsigned int regno = EH_RETURN_DATA_REGNO (i);
1062 
1063 	if (regno == INVALID_REGNUM)
1064 	  break;
1065 	if (! bitmap_bit_p (df_get_live_in (bb), regno))
1066 	  mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
1067 				     regno, true);
1068       }
1069 }
1070 
1071 /* Save current register pressure related info.  */
1072 static void
save_reg_pressure(void)1073 save_reg_pressure (void)
1074 {
1075   int i;
1076 
1077   for (i = 0; i < ira_pressure_classes_num; i++)
1078     saved_reg_pressure[ira_pressure_classes[i]]
1079       = curr_reg_pressure[ira_pressure_classes[i]];
1080   bitmap_copy (saved_reg_live, curr_reg_live);
1081 }
1082 
1083 /* Restore saved register pressure related info.  */
1084 static void
restore_reg_pressure(void)1085 restore_reg_pressure (void)
1086 {
1087   int i;
1088 
1089   for (i = 0; i < ira_pressure_classes_num; i++)
1090     curr_reg_pressure[ira_pressure_classes[i]]
1091       = saved_reg_pressure[ira_pressure_classes[i]];
1092   bitmap_copy (curr_reg_live, saved_reg_live);
1093 }
1094 
1095 /* Return TRUE if the register is dying after its USE.  */
1096 static bool
dying_use_p(struct reg_use_data * use)1097 dying_use_p (struct reg_use_data *use)
1098 {
1099   struct reg_use_data *next;
1100 
1101   for (next = use->next_regno_use; next != use; next = next->next_regno_use)
1102     if (NONDEBUG_INSN_P (next->insn)
1103 	&& QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
1104       return false;
1105   return true;
1106 }
1107 
1108 /* Print info about the current register pressure and its excess for
1109    each pressure class.  */
1110 static void
print_curr_reg_pressure(void)1111 print_curr_reg_pressure (void)
1112 {
1113   int i;
1114   enum reg_class cl;
1115 
1116   fprintf (sched_dump, ";;\t");
1117   for (i = 0; i < ira_pressure_classes_num; i++)
1118     {
1119       cl = ira_pressure_classes[i];
1120       gcc_assert (curr_reg_pressure[cl] >= 0);
1121       fprintf (sched_dump, "  %s:%d(%d)", reg_class_names[cl],
1122 	       curr_reg_pressure[cl],
1123 	       curr_reg_pressure[cl] - sched_class_regs_num[cl]);
1124     }
1125   fprintf (sched_dump, "\n");
1126 }
1127 
1128 /* Determine if INSN has a condition that is clobbered if a register
1129    in SET_REGS is modified.  */
1130 static bool
cond_clobbered_p(rtx_insn * insn,HARD_REG_SET set_regs)1131 cond_clobbered_p (rtx_insn *insn, HARD_REG_SET set_regs)
1132 {
1133   rtx pat = PATTERN (insn);
1134   gcc_assert (GET_CODE (pat) == COND_EXEC);
1135   if (TEST_HARD_REG_BIT (set_regs, REGNO (XEXP (COND_EXEC_TEST (pat), 0))))
1136     {
1137       sd_iterator_def sd_it;
1138       dep_t dep;
1139       haifa_change_pattern (insn, ORIG_PAT (insn));
1140       FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1141 	DEP_STATUS (dep) &= ~DEP_CANCELLED;
1142       TODO_SPEC (insn) = HARD_DEP;
1143       if (sched_verbose >= 2)
1144 	fprintf (sched_dump,
1145 		 ";;\t\tdequeue insn %s because of clobbered condition\n",
1146 		 (*current_sched_info->print_insn) (insn, 0));
1147       return true;
1148     }
1149 
1150   return false;
1151 }
1152 
1153 /* This function should be called after modifying the pattern of INSN,
1154    to update scheduler data structures as needed.  */
1155 static void
update_insn_after_change(rtx_insn * insn)1156 update_insn_after_change (rtx_insn *insn)
1157 {
1158   sd_iterator_def sd_it;
1159   dep_t dep;
1160 
1161   dfa_clear_single_insn_cache (insn);
1162 
1163   sd_it = sd_iterator_start (insn,
1164 			     SD_LIST_FORW | SD_LIST_BACK | SD_LIST_RES_BACK);
1165   while (sd_iterator_cond (&sd_it, &dep))
1166     {
1167       DEP_COST (dep) = UNKNOWN_DEP_COST;
1168       sd_iterator_next (&sd_it);
1169     }
1170 
1171   /* Invalidate INSN_COST, so it'll be recalculated.  */
1172   INSN_COST (insn) = -1;
1173   /* Invalidate INSN_TICK, so it'll be recalculated.  */
1174   INSN_TICK (insn) = INVALID_TICK;
1175 
1176   /* Invalidate autoprefetch data entry.  */
1177   INSN_AUTOPREF_MULTIPASS_DATA (insn)[0].status
1178     = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
1179   INSN_AUTOPREF_MULTIPASS_DATA (insn)[1].status
1180     = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
1181 }
1182 
1183 
1184 /* Two VECs, one to hold dependencies for which pattern replacements
1185    need to be applied or restored at the start of the next cycle, and
1186    another to hold an integer that is either one, to apply the
1187    corresponding replacement, or zero to restore it.  */
1188 static vec<dep_t> next_cycle_replace_deps;
1189 static vec<int> next_cycle_apply;
1190 
1191 static void apply_replacement (dep_t, bool);
1192 static void restore_pattern (dep_t, bool);
1193 
1194 /* Look at the remaining dependencies for insn NEXT, and compute and return
1195    the TODO_SPEC value we should use for it.  This is called after one of
1196    NEXT's dependencies has been resolved.
1197    We also perform pattern replacements for predication, and for broken
1198    replacement dependencies.  The latter is only done if FOR_BACKTRACK is
1199    false.  */
1200 
1201 static ds_t
recompute_todo_spec(rtx_insn * next,bool for_backtrack)1202 recompute_todo_spec (rtx_insn *next, bool for_backtrack)
1203 {
1204   ds_t new_ds;
1205   sd_iterator_def sd_it;
1206   dep_t dep, modify_dep = NULL;
1207   int n_spec = 0;
1208   int n_control = 0;
1209   int n_replace = 0;
1210   bool first_p = true;
1211 
1212   if (sd_lists_empty_p (next, SD_LIST_BACK))
1213     /* NEXT has all its dependencies resolved.  */
1214     return 0;
1215 
1216   if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
1217     return HARD_DEP;
1218 
1219   /* If NEXT is intended to sit adjacent to this instruction, we don't
1220      want to try to break any dependencies.  Treat it as a HARD_DEP.  */
1221   if (SCHED_GROUP_P (next))
1222     return HARD_DEP;
1223 
1224   /* Now we've got NEXT with speculative deps only.
1225      1. Look at the deps to see what we have to do.
1226      2. Check if we can do 'todo'.  */
1227   new_ds = 0;
1228 
1229   FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1230     {
1231       rtx_insn *pro = DEP_PRO (dep);
1232       ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
1233 
1234       if (DEBUG_INSN_P (pro) && !DEBUG_INSN_P (next))
1235 	continue;
1236 
1237       if (ds)
1238 	{
1239 	  n_spec++;
1240 	  if (first_p)
1241 	    {
1242 	      first_p = false;
1243 
1244 	      new_ds = ds;
1245 	    }
1246 	  else
1247 	    new_ds = ds_merge (new_ds, ds);
1248 	}
1249       else if (DEP_TYPE (dep) == REG_DEP_CONTROL)
1250 	{
1251 	  if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1252 	    {
1253 	      n_control++;
1254 	      modify_dep = dep;
1255 	    }
1256 	  DEP_STATUS (dep) &= ~DEP_CANCELLED;
1257 	}
1258       else if (DEP_REPLACE (dep) != NULL)
1259 	{
1260 	  if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1261 	    {
1262 	      n_replace++;
1263 	      modify_dep = dep;
1264 	    }
1265 	  DEP_STATUS (dep) &= ~DEP_CANCELLED;
1266 	}
1267     }
1268 
1269   if (n_replace > 0 && n_control == 0 && n_spec == 0)
1270     {
1271       if (!dbg_cnt (sched_breakdep))
1272 	return HARD_DEP;
1273       FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1274 	{
1275 	  struct dep_replacement *desc = DEP_REPLACE (dep);
1276 	  if (desc != NULL)
1277 	    {
1278 	      if (desc->insn == next && !for_backtrack)
1279 		{
1280 		  gcc_assert (n_replace == 1);
1281 		  apply_replacement (dep, true);
1282 		}
1283 	      DEP_STATUS (dep) |= DEP_CANCELLED;
1284 	    }
1285 	}
1286       return 0;
1287     }
1288 
1289   else if (n_control == 1 && n_replace == 0 && n_spec == 0)
1290     {
1291       rtx_insn *pro, *other;
1292       rtx new_pat;
1293       rtx cond = NULL_RTX;
1294       bool success;
1295       rtx_insn *prev = NULL;
1296       int i;
1297       unsigned regno;
1298 
1299       if ((current_sched_info->flags & DO_PREDICATION) == 0
1300 	  || (ORIG_PAT (next) != NULL_RTX
1301 	      && PREDICATED_PAT (next) == NULL_RTX))
1302 	return HARD_DEP;
1303 
1304       pro = DEP_PRO (modify_dep);
1305       other = real_insn_for_shadow (pro);
1306       if (other != NULL_RTX)
1307 	pro = other;
1308 
1309       cond = sched_get_reverse_condition_uncached (pro);
1310       regno = REGNO (XEXP (cond, 0));
1311 
1312       /* Find the last scheduled insn that modifies the condition register.
1313 	 We can stop looking once we find the insn we depend on through the
1314 	 REG_DEP_CONTROL; if the condition register isn't modified after it,
1315 	 we know that it still has the right value.  */
1316       if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
1317 	FOR_EACH_VEC_ELT_REVERSE (scheduled_insns, i, prev)
1318 	  {
1319 	    HARD_REG_SET t;
1320 
1321 	    find_all_hard_reg_sets (prev, &t, true);
1322 	    if (TEST_HARD_REG_BIT (t, regno))
1323 	      return HARD_DEP;
1324 	    if (prev == pro)
1325 	      break;
1326 	  }
1327       if (ORIG_PAT (next) == NULL_RTX)
1328 	{
1329 	  ORIG_PAT (next) = PATTERN (next);
1330 
1331 	  new_pat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (next));
1332 	  success = haifa_change_pattern (next, new_pat);
1333 	  if (!success)
1334 	    return HARD_DEP;
1335 	  PREDICATED_PAT (next) = new_pat;
1336 	}
1337       else if (PATTERN (next) != PREDICATED_PAT (next))
1338 	{
1339 	  bool success = haifa_change_pattern (next,
1340 					       PREDICATED_PAT (next));
1341 	  gcc_assert (success);
1342 	}
1343       DEP_STATUS (modify_dep) |= DEP_CANCELLED;
1344       return DEP_CONTROL;
1345     }
1346 
1347   if (PREDICATED_PAT (next) != NULL_RTX)
1348     {
1349       int tick = INSN_TICK (next);
1350       bool success = haifa_change_pattern (next,
1351 					   ORIG_PAT (next));
1352       INSN_TICK (next) = tick;
1353       gcc_assert (success);
1354     }
1355 
1356   /* We can't handle the case where there are both speculative and control
1357      dependencies, so we return HARD_DEP in such a case.  Also fail if
1358      we have speculative dependencies with not enough points, or more than
1359      one control dependency.  */
1360   if ((n_spec > 0 && (n_control > 0 || n_replace > 0))
1361       || (n_spec > 0
1362 	  /* Too few points?  */
1363 	  && ds_weak (new_ds) < spec_info->data_weakness_cutoff)
1364       || n_control > 0
1365       || n_replace > 0)
1366     return HARD_DEP;
1367 
1368   return new_ds;
1369 }
1370 
1371 /* Pointer to the last instruction scheduled.  */
1372 static rtx_insn *last_scheduled_insn;
1373 
1374 /* Pointer to the last nondebug instruction scheduled within the
1375    block, or the prev_head of the scheduling block.  Used by
1376    rank_for_schedule, so that insns independent of the last scheduled
1377    insn will be preferred over dependent instructions.  */
1378 static rtx_insn *last_nondebug_scheduled_insn;
1379 
1380 /* Pointer that iterates through the list of unscheduled insns if we
1381    have a dbg_cnt enabled.  It always points at an insn prior to the
1382    first unscheduled one.  */
1383 static rtx_insn *nonscheduled_insns_begin;
1384 
1385 /* Compute cost of executing INSN.
1386    This is the number of cycles between instruction issue and
1387    instruction results.  */
1388 int
insn_sched_cost(rtx_insn * insn)1389 insn_sched_cost (rtx_insn *insn)
1390 {
1391   int cost;
1392 
1393   if (sched_fusion)
1394     return 0;
1395 
1396   if (sel_sched_p ())
1397     {
1398       if (recog_memoized (insn) < 0)
1399 	return 0;
1400 
1401       cost = insn_default_latency (insn);
1402       if (cost < 0)
1403 	cost = 0;
1404 
1405       return cost;
1406     }
1407 
1408   cost = INSN_COST (insn);
1409 
1410   if (cost < 0)
1411     {
1412       /* A USE insn, or something else we don't need to
1413 	 understand.  We can't pass these directly to
1414 	 result_ready_cost or insn_default_latency because it will
1415 	 trigger a fatal error for unrecognizable insns.  */
1416       if (recog_memoized (insn) < 0)
1417 	{
1418 	  INSN_COST (insn) = 0;
1419 	  return 0;
1420 	}
1421       else
1422 	{
1423 	  cost = insn_default_latency (insn);
1424 	  if (cost < 0)
1425 	    cost = 0;
1426 
1427 	  INSN_COST (insn) = cost;
1428 	}
1429     }
1430 
1431   return cost;
1432 }
1433 
1434 /* Compute cost of dependence LINK.
1435    This is the number of cycles between instruction issue and
1436    instruction results.
1437    ??? We also use this function to call recog_memoized on all insns.  */
1438 int
dep_cost_1(dep_t link,dw_t dw)1439 dep_cost_1 (dep_t link, dw_t dw)
1440 {
1441   rtx_insn *insn = DEP_PRO (link);
1442   rtx_insn *used = DEP_CON (link);
1443   int cost;
1444 
1445   if (DEP_COST (link) != UNKNOWN_DEP_COST)
1446     return DEP_COST (link);
1447 
1448   if (delay_htab)
1449     {
1450       struct delay_pair *delay_entry;
1451       delay_entry
1452 	= delay_htab_i2->find_with_hash (used, htab_hash_pointer (used));
1453       if (delay_entry)
1454 	{
1455 	  if (delay_entry->i1 == insn)
1456 	    {
1457 	      DEP_COST (link) = pair_delay (delay_entry);
1458 	      return DEP_COST (link);
1459 	    }
1460 	}
1461     }
1462 
1463   /* A USE insn should never require the value used to be computed.
1464      This allows the computation of a function's result and parameter
1465      values to overlap the return and call.  We don't care about the
1466      dependence cost when only decreasing register pressure.  */
1467   if (recog_memoized (used) < 0)
1468     {
1469       cost = 0;
1470       recog_memoized (insn);
1471     }
1472   else
1473     {
1474       enum reg_note dep_type = DEP_TYPE (link);
1475 
1476       cost = insn_sched_cost (insn);
1477 
1478       if (INSN_CODE (insn) >= 0)
1479 	{
1480 	  if (dep_type == REG_DEP_ANTI)
1481 	    cost = 0;
1482 	  else if (dep_type == REG_DEP_OUTPUT)
1483 	    {
1484 	      cost = (insn_default_latency (insn)
1485 		      - insn_default_latency (used));
1486 	      if (cost <= 0)
1487 		cost = 1;
1488 	    }
1489 	  else if (bypass_p (insn))
1490 	    cost = insn_latency (insn, used);
1491 	}
1492 
1493 
1494       if (targetm.sched.adjust_cost)
1495 	cost = targetm.sched.adjust_cost (used, (int) dep_type, insn, cost,
1496 					  dw);
1497 
1498       if (cost < 0)
1499 	cost = 0;
1500     }
1501 
1502   DEP_COST (link) = cost;
1503   return cost;
1504 }
1505 
1506 /* Compute cost of dependence LINK.
1507    This is the number of cycles between instruction issue and
1508    instruction results.  */
1509 int
dep_cost(dep_t link)1510 dep_cost (dep_t link)
1511 {
1512   return dep_cost_1 (link, 0);
1513 }
1514 
1515 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
1516    INSN_PRIORITY explicitly.  */
1517 void
increase_insn_priority(rtx_insn * insn,int amount)1518 increase_insn_priority (rtx_insn *insn, int amount)
1519 {
1520   if (!sel_sched_p ())
1521     {
1522       /* We're dealing with haifa-sched.c INSN_PRIORITY.  */
1523       if (INSN_PRIORITY_KNOWN (insn))
1524 	  INSN_PRIORITY (insn) += amount;
1525     }
1526   else
1527     {
1528       /* In sel-sched.c INSN_PRIORITY is not kept up to date.
1529 	 Use EXPR_PRIORITY instead. */
1530       sel_add_to_insn_priority (insn, amount);
1531     }
1532 }
1533 
1534 /* Return 'true' if DEP should be included in priority calculations.  */
1535 static bool
contributes_to_priority_p(dep_t dep)1536 contributes_to_priority_p (dep_t dep)
1537 {
1538   if (DEBUG_INSN_P (DEP_CON (dep))
1539       || DEBUG_INSN_P (DEP_PRO (dep)))
1540     return false;
1541 
1542   /* Critical path is meaningful in block boundaries only.  */
1543   if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
1544 						    DEP_PRO (dep)))
1545     return false;
1546 
1547   if (DEP_REPLACE (dep) != NULL)
1548     return false;
1549 
1550   /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
1551      then speculative instructions will less likely be
1552      scheduled.  That is because the priority of
1553      their producers will increase, and, thus, the
1554      producers will more likely be scheduled, thus,
1555      resolving the dependence.  */
1556   if (sched_deps_info->generate_spec_deps
1557       && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
1558       && (DEP_STATUS (dep) & SPECULATIVE))
1559     return false;
1560 
1561   return true;
1562 }
1563 
1564 /* Compute the number of nondebug deps in list LIST for INSN.  */
1565 
1566 static int
dep_list_size(rtx_insn * insn,sd_list_types_def list)1567 dep_list_size (rtx_insn *insn, sd_list_types_def list)
1568 {
1569   sd_iterator_def sd_it;
1570   dep_t dep;
1571   int dbgcount = 0, nodbgcount = 0;
1572 
1573   if (!MAY_HAVE_DEBUG_INSNS)
1574     return sd_lists_size (insn, list);
1575 
1576   FOR_EACH_DEP (insn, list, sd_it, dep)
1577     {
1578       if (DEBUG_INSN_P (DEP_CON (dep)))
1579 	dbgcount++;
1580       else if (!DEBUG_INSN_P (DEP_PRO (dep)))
1581 	nodbgcount++;
1582     }
1583 
1584   gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, list));
1585 
1586   return nodbgcount;
1587 }
1588 
1589 bool sched_fusion;
1590 
1591 /* Compute the priority number for INSN.  */
1592 static int
priority(rtx_insn * insn,bool force_recompute)1593 priority (rtx_insn *insn, bool force_recompute)
1594 {
1595   if (! INSN_P (insn))
1596     return 0;
1597 
1598   /* We should not be interested in priority of an already scheduled insn.  */
1599   gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
1600 
1601   if (force_recompute || !INSN_PRIORITY_KNOWN (insn))
1602     {
1603       int this_priority = -1;
1604 
1605       if (sched_fusion)
1606 	{
1607 	  int this_fusion_priority;
1608 
1609 	  targetm.sched.fusion_priority (insn, FUSION_MAX_PRIORITY,
1610 					 &this_fusion_priority, &this_priority);
1611 	  INSN_FUSION_PRIORITY (insn) = this_fusion_priority;
1612 	}
1613       else if (dep_list_size (insn, SD_LIST_FORW) == 0)
1614 	/* ??? We should set INSN_PRIORITY to insn_sched_cost when and insn
1615 	   has some forward deps but all of them are ignored by
1616 	   contributes_to_priority hook.  At the moment we set priority of
1617 	   such insn to 0.  */
1618 	this_priority = insn_sched_cost (insn);
1619       else
1620 	{
1621 	  rtx_insn *prev_first, *twin;
1622 	  basic_block rec;
1623 
1624 	  /* For recovery check instructions we calculate priority slightly
1625 	     different than that of normal instructions.  Instead of walking
1626 	     through INSN_FORW_DEPS (check) list, we walk through
1627 	     INSN_FORW_DEPS list of each instruction in the corresponding
1628 	     recovery block.  */
1629 
1630           /* Selective scheduling does not define RECOVERY_BLOCK macro.  */
1631 	  rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1632 	  if (!rec || rec == EXIT_BLOCK_PTR_FOR_FN (cfun))
1633 	    {
1634 	      prev_first = PREV_INSN (insn);
1635 	      twin = insn;
1636 	    }
1637 	  else
1638 	    {
1639 	      prev_first = NEXT_INSN (BB_HEAD (rec));
1640 	      twin = PREV_INSN (BB_END (rec));
1641 	    }
1642 
1643 	  do
1644 	    {
1645 	      sd_iterator_def sd_it;
1646 	      dep_t dep;
1647 
1648 	      FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1649 		{
1650 		  rtx_insn *next;
1651 		  int next_priority;
1652 
1653 		  next = DEP_CON (dep);
1654 
1655 		  if (BLOCK_FOR_INSN (next) != rec)
1656 		    {
1657 		      int cost;
1658 
1659 		      if (!contributes_to_priority_p (dep))
1660 			continue;
1661 
1662 		      if (twin == insn)
1663 			cost = dep_cost (dep);
1664 		      else
1665 			{
1666 			  struct _dep _dep1, *dep1 = &_dep1;
1667 
1668 			  init_dep (dep1, insn, next, REG_DEP_ANTI);
1669 
1670 			  cost = dep_cost (dep1);
1671 			}
1672 
1673 		      next_priority = cost + priority (next);
1674 
1675 		      if (next_priority > this_priority)
1676 			this_priority = next_priority;
1677 		    }
1678 		}
1679 
1680 	      twin = PREV_INSN (twin);
1681 	    }
1682 	  while (twin != prev_first);
1683 	}
1684 
1685       if (this_priority < 0)
1686 	{
1687 	  gcc_assert (this_priority == -1);
1688 
1689 	  this_priority = insn_sched_cost (insn);
1690 	}
1691 
1692       INSN_PRIORITY (insn) = this_priority;
1693       INSN_PRIORITY_STATUS (insn) = 1;
1694     }
1695 
1696   return INSN_PRIORITY (insn);
1697 }
1698 
1699 /* Macros and functions for keeping the priority queue sorted, and
1700    dealing with queuing and dequeuing of instructions.  */
1701 
1702 /* For each pressure class CL, set DEATH[CL] to the number of registers
1703    in that class that die in INSN.  */
1704 
1705 static void
calculate_reg_deaths(rtx_insn * insn,int * death)1706 calculate_reg_deaths (rtx_insn *insn, int *death)
1707 {
1708   int i;
1709   struct reg_use_data *use;
1710 
1711   for (i = 0; i < ira_pressure_classes_num; i++)
1712     death[ira_pressure_classes[i]] = 0;
1713   for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1714     if (dying_use_p (use))
1715       mark_regno_birth_or_death (0, death, use->regno, true);
1716 }
1717 
1718 /* Setup info about the current register pressure impact of scheduling
1719    INSN at the current scheduling point.  */
1720 static void
setup_insn_reg_pressure_info(rtx_insn * insn)1721 setup_insn_reg_pressure_info (rtx_insn *insn)
1722 {
1723   int i, change, before, after, hard_regno;
1724   int excess_cost_change;
1725   machine_mode mode;
1726   enum reg_class cl;
1727   struct reg_pressure_data *pressure_info;
1728   int *max_reg_pressure;
1729   static int death[N_REG_CLASSES];
1730 
1731   gcc_checking_assert (!DEBUG_INSN_P (insn));
1732 
1733   excess_cost_change = 0;
1734   calculate_reg_deaths (insn, death);
1735   pressure_info = INSN_REG_PRESSURE (insn);
1736   max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1737   gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1738   for (i = 0; i < ira_pressure_classes_num; i++)
1739     {
1740       cl = ira_pressure_classes[i];
1741       gcc_assert (curr_reg_pressure[cl] >= 0);
1742       change = (int) pressure_info[i].set_increase - death[cl];
1743       before = MAX (0, max_reg_pressure[i] - sched_class_regs_num[cl]);
1744       after = MAX (0, max_reg_pressure[i] + change
1745 		   - sched_class_regs_num[cl]);
1746       hard_regno = ira_class_hard_regs[cl][0];
1747       gcc_assert (hard_regno >= 0);
1748       mode = reg_raw_mode[hard_regno];
1749       excess_cost_change += ((after - before)
1750 			     * (ira_memory_move_cost[mode][cl][0]
1751 				+ ira_memory_move_cost[mode][cl][1]));
1752     }
1753   INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1754 }
1755 
1756 /* This is the first page of code related to SCHED_PRESSURE_MODEL.
1757    It tries to make the scheduler take register pressure into account
1758    without introducing too many unnecessary stalls.  It hooks into the
1759    main scheduling algorithm at several points:
1760 
1761     - Before scheduling starts, model_start_schedule constructs a
1762       "model schedule" for the current block.  This model schedule is
1763       chosen solely to keep register pressure down.  It does not take the
1764       target's pipeline or the original instruction order into account,
1765       except as a tie-breaker.  It also doesn't work to a particular
1766       pressure limit.
1767 
1768       This model schedule gives us an idea of what pressure can be
1769       achieved for the block and gives us an example of a schedule that
1770       keeps to that pressure.  It also makes the final schedule less
1771       dependent on the original instruction order.  This is important
1772       because the original order can either be "wide" (many values live
1773       at once, such as in user-scheduled code) or "narrow" (few values
1774       live at once, such as after loop unrolling, where several
1775       iterations are executed sequentially).
1776 
1777       We do not apply this model schedule to the rtx stream.  We simply
1778       record it in model_schedule.  We also compute the maximum pressure,
1779       MP, that was seen during this schedule.
1780 
1781     - Instructions are added to the ready queue even if they require
1782       a stall.  The length of the stall is instead computed as:
1783 
1784 	 MAX (INSN_TICK (INSN) - clock_var, 0)
1785 
1786       (= insn_delay).  This allows rank_for_schedule to choose between
1787       introducing a deliberate stall or increasing pressure.
1788 
1789     - Before sorting the ready queue, model_set_excess_costs assigns
1790       a pressure-based cost to each ready instruction in the queue.
1791       This is the instruction's INSN_REG_PRESSURE_EXCESS_COST_CHANGE
1792       (ECC for short) and is effectively measured in cycles.
1793 
1794     - rank_for_schedule ranks instructions based on:
1795 
1796 	ECC (insn) + insn_delay (insn)
1797 
1798       then as:
1799 
1800 	insn_delay (insn)
1801 
1802       So, for example, an instruction X1 with an ECC of 1 that can issue
1803       now will win over an instruction X0 with an ECC of zero that would
1804       introduce a stall of one cycle.  However, an instruction X2 with an
1805       ECC of 2 that can issue now will lose to both X0 and X1.
1806 
1807     - When an instruction is scheduled, model_recompute updates the model
1808       schedule with the new pressures (some of which might now exceed the
1809       original maximum pressure MP).  model_update_limit_points then searches
1810       for the new point of maximum pressure, if not already known.  */
1811 
1812 /* Used to separate high-verbosity debug information for SCHED_PRESSURE_MODEL
1813    from surrounding debug information.  */
1814 #define MODEL_BAR \
1815   ";;\t\t+------------------------------------------------------\n"
1816 
1817 /* Information about the pressure on a particular register class at a
1818    particular point of the model schedule.  */
1819 struct model_pressure_data {
1820   /* The pressure at this point of the model schedule, or -1 if the
1821      point is associated with an instruction that has already been
1822      scheduled.  */
1823   int ref_pressure;
1824 
1825   /* The maximum pressure during or after this point of the model schedule.  */
1826   int max_pressure;
1827 };
1828 
1829 /* Per-instruction information that is used while building the model
1830    schedule.  Here, "schedule" refers to the model schedule rather
1831    than the main schedule.  */
1832 struct model_insn_info {
1833   /* The instruction itself.  */
1834   rtx_insn *insn;
1835 
1836   /* If this instruction is in model_worklist, these fields link to the
1837      previous (higher-priority) and next (lower-priority) instructions
1838      in the list.  */
1839   struct model_insn_info *prev;
1840   struct model_insn_info *next;
1841 
1842   /* While constructing the schedule, QUEUE_INDEX describes whether an
1843      instruction has already been added to the schedule (QUEUE_SCHEDULED),
1844      is in model_worklist (QUEUE_READY), or neither (QUEUE_NOWHERE).
1845      old_queue records the value that QUEUE_INDEX had before scheduling
1846      started, so that we can restore it once the schedule is complete.  */
1847   int old_queue;
1848 
1849   /* The relative importance of an unscheduled instruction.  Higher
1850      values indicate greater importance.  */
1851   unsigned int model_priority;
1852 
1853   /* The length of the longest path of satisfied true dependencies
1854      that leads to this instruction.  */
1855   unsigned int depth;
1856 
1857   /* The length of the longest path of dependencies of any kind
1858      that leads from this instruction.  */
1859   unsigned int alap;
1860 
1861   /* The number of predecessor nodes that must still be scheduled.  */
1862   int unscheduled_preds;
1863 };
1864 
1865 /* Information about the pressure limit for a particular register class.
1866    This structure is used when applying a model schedule to the main
1867    schedule.  */
1868 struct model_pressure_limit {
1869   /* The maximum register pressure seen in the original model schedule.  */
1870   int orig_pressure;
1871 
1872   /* The maximum register pressure seen in the current model schedule
1873      (which excludes instructions that have already been scheduled).  */
1874   int pressure;
1875 
1876   /* The point of the current model schedule at which PRESSURE is first
1877      reached.  It is set to -1 if the value needs to be recomputed.  */
1878   int point;
1879 };
1880 
1881 /* Describes a particular way of measuring register pressure.  */
1882 struct model_pressure_group {
1883   /* Index PCI describes the maximum pressure on ira_pressure_classes[PCI].  */
1884   struct model_pressure_limit limits[N_REG_CLASSES];
1885 
1886   /* Index (POINT * ira_num_pressure_classes + PCI) describes the pressure
1887      on register class ira_pressure_classes[PCI] at point POINT of the
1888      current model schedule.  A POINT of model_num_insns describes the
1889      pressure at the end of the schedule.  */
1890   struct model_pressure_data *model;
1891 };
1892 
1893 /* Index POINT gives the instruction at point POINT of the model schedule.
1894    This array doesn't change during main scheduling.  */
1895 static vec<rtx_insn *> model_schedule;
1896 
1897 /* The list of instructions in the model worklist, sorted in order of
1898    decreasing priority.  */
1899 static struct model_insn_info *model_worklist;
1900 
1901 /* Index I describes the instruction with INSN_LUID I.  */
1902 static struct model_insn_info *model_insns;
1903 
1904 /* The number of instructions in the model schedule.  */
1905 static int model_num_insns;
1906 
1907 /* The index of the first instruction in model_schedule that hasn't yet been
1908    added to the main schedule, or model_num_insns if all of them have.  */
1909 static int model_curr_point;
1910 
1911 /* Describes the pressure before each instruction in the model schedule.  */
1912 static struct model_pressure_group model_before_pressure;
1913 
1914 /* The first unused model_priority value (as used in model_insn_info).  */
1915 static unsigned int model_next_priority;
1916 
1917 
1918 /* The model_pressure_data for ira_pressure_classes[PCI] in GROUP
1919    at point POINT of the model schedule.  */
1920 #define MODEL_PRESSURE_DATA(GROUP, POINT, PCI) \
1921   (&(GROUP)->model[(POINT) * ira_pressure_classes_num + (PCI)])
1922 
1923 /* The maximum pressure on ira_pressure_classes[PCI] in GROUP at or
1924    after point POINT of the model schedule.  */
1925 #define MODEL_MAX_PRESSURE(GROUP, POINT, PCI) \
1926   (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->max_pressure)
1927 
1928 /* The pressure on ira_pressure_classes[PCI] in GROUP at point POINT
1929    of the model schedule.  */
1930 #define MODEL_REF_PRESSURE(GROUP, POINT, PCI) \
1931   (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->ref_pressure)
1932 
1933 /* Information about INSN that is used when creating the model schedule.  */
1934 #define MODEL_INSN_INFO(INSN) \
1935   (&model_insns[INSN_LUID (INSN)])
1936 
1937 /* The instruction at point POINT of the model schedule.  */
1938 #define MODEL_INSN(POINT) \
1939   (model_schedule[POINT])
1940 
1941 
1942 /* Return INSN's index in the model schedule, or model_num_insns if it
1943    doesn't belong to that schedule.  */
1944 
1945 static int
model_index(rtx_insn * insn)1946 model_index (rtx_insn *insn)
1947 {
1948   if (INSN_MODEL_INDEX (insn) == 0)
1949     return model_num_insns;
1950   return INSN_MODEL_INDEX (insn) - 1;
1951 }
1952 
1953 /* Make sure that GROUP->limits is up-to-date for the current point
1954    of the model schedule.  */
1955 
1956 static void
model_update_limit_points_in_group(struct model_pressure_group * group)1957 model_update_limit_points_in_group (struct model_pressure_group *group)
1958 {
1959   int pci, max_pressure, point;
1960 
1961   for (pci = 0; pci < ira_pressure_classes_num; pci++)
1962     {
1963       /* We may have passed the final point at which the pressure in
1964 	 group->limits[pci].pressure was reached.  Update the limit if so.  */
1965       max_pressure = MODEL_MAX_PRESSURE (group, model_curr_point, pci);
1966       group->limits[pci].pressure = max_pressure;
1967 
1968       /* Find the point at which MAX_PRESSURE is first reached.  We need
1969 	 to search in three cases:
1970 
1971 	 - We've already moved past the previous pressure point.
1972 	   In this case we search forward from model_curr_point.
1973 
1974 	 - We scheduled the previous point of maximum pressure ahead of
1975 	   its position in the model schedule, but doing so didn't bring
1976 	   the pressure point earlier.  In this case we search forward
1977 	   from that previous pressure point.
1978 
1979 	 - Scheduling an instruction early caused the maximum pressure
1980 	   to decrease.  In this case we will have set the pressure
1981 	   point to -1, and we search forward from model_curr_point.  */
1982       point = MAX (group->limits[pci].point, model_curr_point);
1983       while (point < model_num_insns
1984 	     && MODEL_REF_PRESSURE (group, point, pci) < max_pressure)
1985 	point++;
1986       group->limits[pci].point = point;
1987 
1988       gcc_assert (MODEL_REF_PRESSURE (group, point, pci) == max_pressure);
1989       gcc_assert (MODEL_MAX_PRESSURE (group, point, pci) == max_pressure);
1990     }
1991 }
1992 
1993 /* Make sure that all register-pressure limits are up-to-date for the
1994    current position in the model schedule.  */
1995 
1996 static void
model_update_limit_points(void)1997 model_update_limit_points (void)
1998 {
1999   model_update_limit_points_in_group (&model_before_pressure);
2000 }
2001 
2002 /* Return the model_index of the last unscheduled use in chain USE
2003    outside of USE's instruction.  Return -1 if there are no other uses,
2004    or model_num_insns if the register is live at the end of the block.  */
2005 
2006 static int
model_last_use_except(struct reg_use_data * use)2007 model_last_use_except (struct reg_use_data *use)
2008 {
2009   struct reg_use_data *next;
2010   int last, index;
2011 
2012   last = -1;
2013   for (next = use->next_regno_use; next != use; next = next->next_regno_use)
2014     if (NONDEBUG_INSN_P (next->insn)
2015 	&& QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
2016       {
2017 	index = model_index (next->insn);
2018 	if (index == model_num_insns)
2019 	  return model_num_insns;
2020 	if (last < index)
2021 	  last = index;
2022       }
2023   return last;
2024 }
2025 
2026 /* An instruction with model_index POINT has just been scheduled, and it
2027    adds DELTA to the pressure on ira_pressure_classes[PCI] after POINT - 1.
2028    Update MODEL_REF_PRESSURE (GROUP, POINT, PCI) and
2029    MODEL_MAX_PRESSURE (GROUP, POINT, PCI) accordingly.  */
2030 
2031 static void
model_start_update_pressure(struct model_pressure_group * group,int point,int pci,int delta)2032 model_start_update_pressure (struct model_pressure_group *group,
2033 			     int point, int pci, int delta)
2034 {
2035   int next_max_pressure;
2036 
2037   if (point == model_num_insns)
2038     {
2039       /* The instruction wasn't part of the model schedule; it was moved
2040 	 from a different block.  Update the pressure for the end of
2041 	 the model schedule.  */
2042       MODEL_REF_PRESSURE (group, point, pci) += delta;
2043       MODEL_MAX_PRESSURE (group, point, pci) += delta;
2044     }
2045   else
2046     {
2047       /* Record that this instruction has been scheduled.  Nothing now
2048 	 changes between POINT and POINT + 1, so get the maximum pressure
2049 	 from the latter.  If the maximum pressure decreases, the new
2050 	 pressure point may be before POINT.  */
2051       MODEL_REF_PRESSURE (group, point, pci) = -1;
2052       next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2053       if (MODEL_MAX_PRESSURE (group, point, pci) > next_max_pressure)
2054 	{
2055 	  MODEL_MAX_PRESSURE (group, point, pci) = next_max_pressure;
2056 	  if (group->limits[pci].point == point)
2057 	    group->limits[pci].point = -1;
2058 	}
2059     }
2060 }
2061 
2062 /* Record that scheduling a later instruction has changed the pressure
2063    at point POINT of the model schedule by DELTA (which might be 0).
2064    Update GROUP accordingly.  Return nonzero if these changes might
2065    trigger changes to previous points as well.  */
2066 
2067 static int
model_update_pressure(struct model_pressure_group * group,int point,int pci,int delta)2068 model_update_pressure (struct model_pressure_group *group,
2069 		       int point, int pci, int delta)
2070 {
2071   int ref_pressure, max_pressure, next_max_pressure;
2072 
2073   /* If POINT hasn't yet been scheduled, update its pressure.  */
2074   ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
2075   if (ref_pressure >= 0 && delta != 0)
2076     {
2077       ref_pressure += delta;
2078       MODEL_REF_PRESSURE (group, point, pci) = ref_pressure;
2079 
2080       /* Check whether the maximum pressure in the overall schedule
2081 	 has increased.  (This means that the MODEL_MAX_PRESSURE of
2082 	 every point <= POINT will need to increase too; see below.)  */
2083       if (group->limits[pci].pressure < ref_pressure)
2084 	group->limits[pci].pressure = ref_pressure;
2085 
2086       /* If we are at maximum pressure, and the maximum pressure
2087 	 point was previously unknown or later than POINT,
2088 	 bring it forward.  */
2089       if (group->limits[pci].pressure == ref_pressure
2090 	  && !IN_RANGE (group->limits[pci].point, 0, point))
2091 	group->limits[pci].point = point;
2092 
2093       /* If POINT used to be the point of maximum pressure, but isn't
2094 	 any longer, we need to recalculate it using a forward walk.  */
2095       if (group->limits[pci].pressure > ref_pressure
2096 	  && group->limits[pci].point == point)
2097 	group->limits[pci].point = -1;
2098     }
2099 
2100   /* Update the maximum pressure at POINT.  Changes here might also
2101      affect the maximum pressure at POINT - 1.  */
2102   next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2103   max_pressure = MAX (ref_pressure, next_max_pressure);
2104   if (MODEL_MAX_PRESSURE (group, point, pci) != max_pressure)
2105     {
2106       MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
2107       return 1;
2108     }
2109   return 0;
2110 }
2111 
2112 /* INSN has just been scheduled.  Update the model schedule accordingly.  */
2113 
2114 static void
model_recompute(rtx_insn * insn)2115 model_recompute (rtx_insn *insn)
2116 {
2117   struct {
2118     int last_use;
2119     int regno;
2120   } uses[FIRST_PSEUDO_REGISTER + MAX_RECOG_OPERANDS];
2121   struct reg_use_data *use;
2122   struct reg_pressure_data *reg_pressure;
2123   int delta[N_REG_CLASSES];
2124   int pci, point, mix, new_last, cl, ref_pressure, queue;
2125   unsigned int i, num_uses, num_pending_births;
2126   bool print_p;
2127 
2128   /* The destinations of INSN were previously live from POINT onwards, but are
2129      now live from model_curr_point onwards.  Set up DELTA accordingly.  */
2130   point = model_index (insn);
2131   reg_pressure = INSN_REG_PRESSURE (insn);
2132   for (pci = 0; pci < ira_pressure_classes_num; pci++)
2133     {
2134       cl = ira_pressure_classes[pci];
2135       delta[cl] = reg_pressure[pci].set_increase;
2136     }
2137 
2138   /* Record which registers previously died at POINT, but which now die
2139      before POINT.  Adjust DELTA so that it represents the effect of
2140      this change after POINT - 1.  Set NUM_PENDING_BIRTHS to the number of
2141      registers that will be born in the range [model_curr_point, POINT).  */
2142   num_uses = 0;
2143   num_pending_births = 0;
2144   bitmap_clear (tmp_bitmap);
2145   for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2146     {
2147       new_last = model_last_use_except (use);
2148       if (new_last < point && bitmap_set_bit (tmp_bitmap, use->regno))
2149 	{
2150 	  gcc_assert (num_uses < ARRAY_SIZE (uses));
2151 	  uses[num_uses].last_use = new_last;
2152 	  uses[num_uses].regno = use->regno;
2153 	  /* This register is no longer live after POINT - 1.  */
2154 	  mark_regno_birth_or_death (NULL, delta, use->regno, false);
2155 	  num_uses++;
2156 	  if (new_last >= 0)
2157 	    num_pending_births++;
2158 	}
2159     }
2160 
2161   /* Update the MODEL_REF_PRESSURE and MODEL_MAX_PRESSURE for POINT.
2162      Also set each group pressure limit for POINT.  */
2163   for (pci = 0; pci < ira_pressure_classes_num; pci++)
2164     {
2165       cl = ira_pressure_classes[pci];
2166       model_start_update_pressure (&model_before_pressure,
2167 				   point, pci, delta[cl]);
2168     }
2169 
2170   /* Walk the model schedule backwards, starting immediately before POINT.  */
2171   print_p = false;
2172   if (point != model_curr_point)
2173     do
2174       {
2175 	point--;
2176 	insn = MODEL_INSN (point);
2177 	queue = QUEUE_INDEX (insn);
2178 
2179 	if (queue != QUEUE_SCHEDULED)
2180 	  {
2181 	    /* DELTA describes the effect of the move on the register pressure
2182 	       after POINT.  Make it describe the effect on the pressure
2183 	       before POINT.  */
2184 	    i = 0;
2185 	    while (i < num_uses)
2186 	      {
2187 		if (uses[i].last_use == point)
2188 		  {
2189 		    /* This register is now live again.  */
2190 		    mark_regno_birth_or_death (NULL, delta,
2191 					       uses[i].regno, true);
2192 
2193 		    /* Remove this use from the array.  */
2194 		    uses[i] = uses[num_uses - 1];
2195 		    num_uses--;
2196 		    num_pending_births--;
2197 		  }
2198 		else
2199 		  i++;
2200 	      }
2201 
2202 	    if (sched_verbose >= 5)
2203 	      {
2204 		if (!print_p)
2205 		  {
2206 		    fprintf (sched_dump, MODEL_BAR);
2207 		    fprintf (sched_dump, ";;\t\t| New pressure for model"
2208 			     " schedule\n");
2209 		    fprintf (sched_dump, MODEL_BAR);
2210 		    print_p = true;
2211 		  }
2212 
2213 		fprintf (sched_dump, ";;\t\t| %3d %4d %-30s ",
2214 			 point, INSN_UID (insn),
2215 			 str_pattern_slim (PATTERN (insn)));
2216 		for (pci = 0; pci < ira_pressure_classes_num; pci++)
2217 		  {
2218 		    cl = ira_pressure_classes[pci];
2219 		    ref_pressure = MODEL_REF_PRESSURE (&model_before_pressure,
2220 						       point, pci);
2221 		    fprintf (sched_dump, " %s:[%d->%d]",
2222 			     reg_class_names[ira_pressure_classes[pci]],
2223 			     ref_pressure, ref_pressure + delta[cl]);
2224 		  }
2225 		fprintf (sched_dump, "\n");
2226 	      }
2227 	  }
2228 
2229 	/* Adjust the pressure at POINT.  Set MIX to nonzero if POINT - 1
2230 	   might have changed as well.  */
2231 	mix = num_pending_births;
2232 	for (pci = 0; pci < ira_pressure_classes_num; pci++)
2233 	  {
2234 	    cl = ira_pressure_classes[pci];
2235 	    mix |= delta[cl];
2236 	    mix |= model_update_pressure (&model_before_pressure,
2237 					  point, pci, delta[cl]);
2238 	  }
2239       }
2240     while (mix && point > model_curr_point);
2241 
2242   if (print_p)
2243     fprintf (sched_dump, MODEL_BAR);
2244 }
2245 
2246 /* After DEP, which was cancelled, has been resolved for insn NEXT,
2247    check whether the insn's pattern needs restoring.  */
2248 static bool
must_restore_pattern_p(rtx_insn * next,dep_t dep)2249 must_restore_pattern_p (rtx_insn *next, dep_t dep)
2250 {
2251   if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
2252     return false;
2253 
2254   if (DEP_TYPE (dep) == REG_DEP_CONTROL)
2255     {
2256       gcc_assert (ORIG_PAT (next) != NULL_RTX);
2257       gcc_assert (next == DEP_CON (dep));
2258     }
2259   else
2260     {
2261       struct dep_replacement *desc = DEP_REPLACE (dep);
2262       if (desc->insn != next)
2263 	{
2264 	  gcc_assert (*desc->loc == desc->orig);
2265 	  return false;
2266 	}
2267     }
2268   return true;
2269 }
2270 
2271 /* model_spill_cost (CL, P, P') returns the cost of increasing the
2272    pressure on CL from P to P'.  We use this to calculate a "base ECC",
2273    baseECC (CL, X), for each pressure class CL and each instruction X.
2274    Supposing X changes the pressure on CL from P to P', and that the
2275    maximum pressure on CL in the current model schedule is MP', then:
2276 
2277    * if X occurs before or at the next point of maximum pressure in
2278      the model schedule and P' > MP', then:
2279 
2280        baseECC (CL, X) = model_spill_cost (CL, MP, P')
2281 
2282      The idea is that the pressure after scheduling a fixed set of
2283      instructions -- in this case, the set up to and including the
2284      next maximum pressure point -- is going to be the same regardless
2285      of the order; we simply want to keep the intermediate pressure
2286      under control.  Thus X has a cost of zero unless scheduling it
2287      now would exceed MP'.
2288 
2289      If all increases in the set are by the same amount, no zero-cost
2290      instruction will ever cause the pressure to exceed MP'.  However,
2291      if X is instead moved past an instruction X' with pressure in the
2292      range (MP' - (P' - P), MP'), the pressure at X' will increase
2293      beyond MP'.  Since baseECC is very much a heuristic anyway,
2294      it doesn't seem worth the overhead of tracking cases like these.
2295 
2296      The cost of exceeding MP' is always based on the original maximum
2297      pressure MP.  This is so that going 2 registers over the original
2298      limit has the same cost regardless of whether it comes from two
2299      separate +1 deltas or from a single +2 delta.
2300 
2301    * if X occurs after the next point of maximum pressure in the model
2302      schedule and P' > P, then:
2303 
2304        baseECC (CL, X) = model_spill_cost (CL, MP, MP' + (P' - P))
2305 
2306      That is, if we move X forward across a point of maximum pressure,
2307      and if X increases the pressure by P' - P, then we conservatively
2308      assume that scheduling X next would increase the maximum pressure
2309      by P' - P.  Again, the cost of doing this is based on the original
2310      maximum pressure MP, for the same reason as above.
2311 
2312    * if P' < P, P > MP, and X occurs at or after the next point of
2313      maximum pressure, then:
2314 
2315        baseECC (CL, X) = -model_spill_cost (CL, MAX (MP, P'), P)
2316 
2317      That is, if we have already exceeded the original maximum pressure MP,
2318      and if X might reduce the maximum pressure again -- or at least push
2319      it further back, and thus allow more scheduling freedom -- it is given
2320      a negative cost to reflect the improvement.
2321 
2322    * otherwise,
2323 
2324        baseECC (CL, X) = 0
2325 
2326      In this case, X is not expected to affect the maximum pressure MP',
2327      so it has zero cost.
2328 
2329    We then create a combined value baseECC (X) that is the sum of
2330    baseECC (CL, X) for each pressure class CL.
2331 
2332    baseECC (X) could itself be used as the ECC value described above.
2333    However, this is often too conservative, in the sense that it
2334    tends to make high-priority instructions that increase pressure
2335    wait too long in cases where introducing a spill would be better.
2336    For this reason the final ECC is a priority-adjusted form of
2337    baseECC (X).  Specifically, we calculate:
2338 
2339      P (X) = INSN_PRIORITY (X) - insn_delay (X) - baseECC (X)
2340      baseP = MAX { P (X) | baseECC (X) <= 0 }
2341 
2342    Then:
2343 
2344      ECC (X) = MAX (MIN (baseP - P (X), baseECC (X)), 0)
2345 
2346    Thus an instruction's effect on pressure is ignored if it has a high
2347    enough priority relative to the ones that don't increase pressure.
2348    Negative values of baseECC (X) do not increase the priority of X
2349    itself, but they do make it harder for other instructions to
2350    increase the pressure further.
2351 
2352    This pressure cost is deliberately timid.  The intention has been
2353    to choose a heuristic that rarely interferes with the normal list
2354    scheduler in cases where that scheduler would produce good code.
2355    We simply want to curb some of its worst excesses.  */
2356 
2357 /* Return the cost of increasing the pressure in class CL from FROM to TO.
2358 
2359    Here we use the very simplistic cost model that every register above
2360    sched_class_regs_num[CL] has a spill cost of 1.  We could use other
2361    measures instead, such as one based on MEMORY_MOVE_COST.  However:
2362 
2363       (1) In order for an instruction to be scheduled, the higher cost
2364 	  would need to be justified in a single saving of that many stalls.
2365 	  This is overly pessimistic, because the benefit of spilling is
2366 	  often to avoid a sequence of several short stalls rather than
2367 	  a single long one.
2368 
2369       (2) The cost is still arbitrary.  Because we are not allocating
2370 	  registers during scheduling, we have no way of knowing for
2371 	  sure how many memory accesses will be required by each spill,
2372 	  where the spills will be placed within the block, or even
2373 	  which block(s) will contain the spills.
2374 
2375    So a higher cost than 1 is often too conservative in practice,
2376    forcing blocks to contain unnecessary stalls instead of spill code.
2377    The simple cost below seems to be the best compromise.  It reduces
2378    the interference with the normal list scheduler, which helps make
2379    it more suitable for a default-on option.  */
2380 
2381 static int
model_spill_cost(int cl,int from,int to)2382 model_spill_cost (int cl, int from, int to)
2383 {
2384   from = MAX (from, sched_class_regs_num[cl]);
2385   return MAX (to, from) - from;
2386 }
2387 
2388 /* Return baseECC (ira_pressure_classes[PCI], POINT), given that
2389    P = curr_reg_pressure[ira_pressure_classes[PCI]] and that
2390    P' = P + DELTA.  */
2391 
2392 static int
model_excess_group_cost(struct model_pressure_group * group,int point,int pci,int delta)2393 model_excess_group_cost (struct model_pressure_group *group,
2394 			 int point, int pci, int delta)
2395 {
2396   int pressure, cl;
2397 
2398   cl = ira_pressure_classes[pci];
2399   if (delta < 0 && point >= group->limits[pci].point)
2400     {
2401       pressure = MAX (group->limits[pci].orig_pressure,
2402 		      curr_reg_pressure[cl] + delta);
2403       return -model_spill_cost (cl, pressure, curr_reg_pressure[cl]);
2404     }
2405 
2406   if (delta > 0)
2407     {
2408       if (point > group->limits[pci].point)
2409 	pressure = group->limits[pci].pressure + delta;
2410       else
2411 	pressure = curr_reg_pressure[cl] + delta;
2412 
2413       if (pressure > group->limits[pci].pressure)
2414 	return model_spill_cost (cl, group->limits[pci].orig_pressure,
2415 				 pressure);
2416     }
2417 
2418   return 0;
2419 }
2420 
2421 /* Return baseECC (MODEL_INSN (INSN)).  Dump the costs to sched_dump
2422    if PRINT_P.  */
2423 
2424 static int
model_excess_cost(rtx_insn * insn,bool print_p)2425 model_excess_cost (rtx_insn *insn, bool print_p)
2426 {
2427   int point, pci, cl, cost, this_cost, delta;
2428   struct reg_pressure_data *insn_reg_pressure;
2429   int insn_death[N_REG_CLASSES];
2430 
2431   calculate_reg_deaths (insn, insn_death);
2432   point = model_index (insn);
2433   insn_reg_pressure = INSN_REG_PRESSURE (insn);
2434   cost = 0;
2435 
2436   if (print_p)
2437     fprintf (sched_dump, ";;\t\t| %3d %4d | %4d %+3d |", point,
2438 	     INSN_UID (insn), INSN_PRIORITY (insn), insn_delay (insn));
2439 
2440   /* Sum up the individual costs for each register class.  */
2441   for (pci = 0; pci < ira_pressure_classes_num; pci++)
2442     {
2443       cl = ira_pressure_classes[pci];
2444       delta = insn_reg_pressure[pci].set_increase - insn_death[cl];
2445       this_cost = model_excess_group_cost (&model_before_pressure,
2446 					   point, pci, delta);
2447       cost += this_cost;
2448       if (print_p)
2449 	fprintf (sched_dump, " %s:[%d base cost %d]",
2450 		 reg_class_names[cl], delta, this_cost);
2451     }
2452 
2453   if (print_p)
2454     fprintf (sched_dump, "\n");
2455 
2456   return cost;
2457 }
2458 
2459 /* Dump the next points of maximum pressure for GROUP.  */
2460 
2461 static void
model_dump_pressure_points(struct model_pressure_group * group)2462 model_dump_pressure_points (struct model_pressure_group *group)
2463 {
2464   int pci, cl;
2465 
2466   fprintf (sched_dump, ";;\t\t|  pressure points");
2467   for (pci = 0; pci < ira_pressure_classes_num; pci++)
2468     {
2469       cl = ira_pressure_classes[pci];
2470       fprintf (sched_dump, " %s:[%d->%d at ", reg_class_names[cl],
2471 	       curr_reg_pressure[cl], group->limits[pci].pressure);
2472       if (group->limits[pci].point < model_num_insns)
2473 	fprintf (sched_dump, "%d:%d]", group->limits[pci].point,
2474 		 INSN_UID (MODEL_INSN (group->limits[pci].point)));
2475       else
2476 	fprintf (sched_dump, "end]");
2477     }
2478   fprintf (sched_dump, "\n");
2479 }
2480 
2481 /* Set INSN_REG_PRESSURE_EXCESS_COST_CHANGE for INSNS[0...COUNT-1].  */
2482 
2483 static void
model_set_excess_costs(rtx_insn ** insns,int count)2484 model_set_excess_costs (rtx_insn **insns, int count)
2485 {
2486   int i, cost, priority_base, priority;
2487   bool print_p;
2488 
2489   /* Record the baseECC value for each instruction in the model schedule,
2490      except that negative costs are converted to zero ones now rather than
2491      later.  Do not assign a cost to debug instructions, since they must
2492      not change code-generation decisions.  Experiments suggest we also
2493      get better results by not assigning a cost to instructions from
2494      a different block.
2495 
2496      Set PRIORITY_BASE to baseP in the block comment above.  This is the
2497      maximum priority of the "cheap" instructions, which should always
2498      include the next model instruction.  */
2499   priority_base = 0;
2500   print_p = false;
2501   for (i = 0; i < count; i++)
2502     if (INSN_MODEL_INDEX (insns[i]))
2503       {
2504 	if (sched_verbose >= 6 && !print_p)
2505 	  {
2506 	    fprintf (sched_dump, MODEL_BAR);
2507 	    fprintf (sched_dump, ";;\t\t| Pressure costs for ready queue\n");
2508 	    model_dump_pressure_points (&model_before_pressure);
2509 	    fprintf (sched_dump, MODEL_BAR);
2510 	    print_p = true;
2511 	  }
2512 	cost = model_excess_cost (insns[i], print_p);
2513 	if (cost <= 0)
2514 	  {
2515 	    priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]) - cost;
2516 	    priority_base = MAX (priority_base, priority);
2517 	    cost = 0;
2518 	  }
2519 	INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = cost;
2520       }
2521   if (print_p)
2522     fprintf (sched_dump, MODEL_BAR);
2523 
2524   /* Use MAX (baseECC, 0) and baseP to calculcate ECC for each
2525      instruction.  */
2526   for (i = 0; i < count; i++)
2527     {
2528       cost = INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]);
2529       priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]);
2530       if (cost > 0 && priority > priority_base)
2531 	{
2532 	  cost += priority_base - priority;
2533 	  INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = MAX (cost, 0);
2534 	}
2535     }
2536 }
2537 
2538 
2539 /* Enum of rank_for_schedule heuristic decisions.  */
2540 enum rfs_decision {
2541   RFS_LIVE_RANGE_SHRINK1, RFS_LIVE_RANGE_SHRINK2,
2542   RFS_SCHED_GROUP, RFS_PRESSURE_DELAY, RFS_PRESSURE_TICK,
2543   RFS_FEEDS_BACKTRACK_INSN, RFS_PRIORITY, RFS_SPECULATION,
2544   RFS_SCHED_RANK, RFS_LAST_INSN, RFS_PRESSURE_INDEX,
2545   RFS_DEP_COUNT, RFS_TIE, RFS_FUSION, RFS_COST, RFS_N };
2546 
2547 /* Corresponding strings for print outs.  */
2548 static const char *rfs_str[RFS_N] = {
2549   "RFS_LIVE_RANGE_SHRINK1", "RFS_LIVE_RANGE_SHRINK2",
2550   "RFS_SCHED_GROUP", "RFS_PRESSURE_DELAY", "RFS_PRESSURE_TICK",
2551   "RFS_FEEDS_BACKTRACK_INSN", "RFS_PRIORITY", "RFS_SPECULATION",
2552   "RFS_SCHED_RANK", "RFS_LAST_INSN", "RFS_PRESSURE_INDEX",
2553   "RFS_DEP_COUNT", "RFS_TIE", "RFS_FUSION", "RFS_COST" };
2554 
2555 /* Statistical breakdown of rank_for_schedule decisions.  */
2556 struct rank_for_schedule_stats_t { unsigned stats[RFS_N]; };
2557 static rank_for_schedule_stats_t rank_for_schedule_stats;
2558 
2559 /* Return the result of comparing insns TMP and TMP2 and update
2560    Rank_For_Schedule statistics.  */
2561 static int
rfs_result(enum rfs_decision decision,int result,rtx tmp,rtx tmp2)2562 rfs_result (enum rfs_decision decision, int result, rtx tmp, rtx tmp2)
2563 {
2564   ++rank_for_schedule_stats.stats[decision];
2565   if (result < 0)
2566     INSN_LAST_RFS_WIN (tmp) = decision;
2567   else if (result > 0)
2568     INSN_LAST_RFS_WIN (tmp2) = decision;
2569   else
2570     gcc_unreachable ();
2571   return result;
2572 }
2573 
2574 /* Sorting predicate to move DEBUG_INSNs to the top of ready list, while
2575    keeping normal insns in original order.  */
2576 
2577 static int
rank_for_schedule_debug(const void * x,const void * y)2578 rank_for_schedule_debug (const void *x, const void *y)
2579 {
2580   rtx_insn *tmp = *(rtx_insn * const *) y;
2581   rtx_insn *tmp2 = *(rtx_insn * const *) x;
2582 
2583   /* Schedule debug insns as early as possible.  */
2584   if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
2585     return -1;
2586   else if (!DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2587     return 1;
2588   else if (DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2589     return INSN_LUID (tmp) - INSN_LUID (tmp2);
2590   else
2591     return INSN_RFS_DEBUG_ORIG_ORDER (tmp2) - INSN_RFS_DEBUG_ORIG_ORDER (tmp);
2592 }
2593 
2594 /* Returns a positive value if x is preferred; returns a negative value if
2595    y is preferred.  Should never return 0, since that will make the sort
2596    unstable.  */
2597 
2598 static int
rank_for_schedule(const void * x,const void * y)2599 rank_for_schedule (const void *x, const void *y)
2600 {
2601   rtx_insn *tmp = *(rtx_insn * const *) y;
2602   rtx_insn *tmp2 = *(rtx_insn * const *) x;
2603   int tmp_class, tmp2_class;
2604   int val, priority_val, info_val, diff;
2605 
2606   if (live_range_shrinkage_p)
2607     {
2608       /* Don't use SCHED_PRESSURE_MODEL -- it results in much worse
2609 	 code.  */
2610       gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
2611       if ((INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp) < 0
2612 	   || INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2) < 0)
2613 	  && (diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2614 		      - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2))) != 0)
2615 	return rfs_result (RFS_LIVE_RANGE_SHRINK1, diff, tmp, tmp2);
2616       /* Sort by INSN_LUID (original insn order), so that we make the
2617 	 sort stable.  This minimizes instruction movement, thus
2618 	 minimizing sched's effect on debugging and cross-jumping.  */
2619       return rfs_result (RFS_LIVE_RANGE_SHRINK2,
2620 			 INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2621     }
2622 
2623   /* The insn in a schedule group should be issued the first.  */
2624   if (flag_sched_group_heuristic &&
2625       SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
2626     return rfs_result (RFS_SCHED_GROUP, SCHED_GROUP_P (tmp2) ? 1 : -1,
2627 		       tmp, tmp2);
2628 
2629   /* Make sure that priority of TMP and TMP2 are initialized.  */
2630   gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
2631 
2632   if (sched_fusion)
2633     {
2634       /* The instruction that has the same fusion priority as the last
2635 	 instruction is the instruction we picked next.  If that is not
2636 	 the case, we sort ready list firstly by fusion priority, then
2637 	 by priority, and at last by INSN_LUID.  */
2638       int a = INSN_FUSION_PRIORITY (tmp);
2639       int b = INSN_FUSION_PRIORITY (tmp2);
2640       int last = -1;
2641 
2642       if (last_nondebug_scheduled_insn
2643 	  && !NOTE_P (last_nondebug_scheduled_insn)
2644 	  && BLOCK_FOR_INSN (tmp)
2645 	       == BLOCK_FOR_INSN (last_nondebug_scheduled_insn))
2646 	last = INSN_FUSION_PRIORITY (last_nondebug_scheduled_insn);
2647 
2648       if (a != last && b != last)
2649 	{
2650 	  if (a == b)
2651 	    {
2652 	      a = INSN_PRIORITY (tmp);
2653 	      b = INSN_PRIORITY (tmp2);
2654 	    }
2655 	  if (a != b)
2656 	    return rfs_result (RFS_FUSION, b - a, tmp, tmp2);
2657 	  else
2658 	    return rfs_result (RFS_FUSION,
2659 			       INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2660 	}
2661       else if (a == b)
2662 	{
2663 	  gcc_assert (last_nondebug_scheduled_insn
2664 		      && !NOTE_P (last_nondebug_scheduled_insn));
2665 	  last = INSN_PRIORITY (last_nondebug_scheduled_insn);
2666 
2667 	  a = abs (INSN_PRIORITY (tmp) - last);
2668 	  b = abs (INSN_PRIORITY (tmp2) - last);
2669 	  if (a != b)
2670 	    return rfs_result (RFS_FUSION, a - b, tmp, tmp2);
2671 	  else
2672 	    return rfs_result (RFS_FUSION,
2673 			       INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2674 	}
2675       else if (a == last)
2676 	return rfs_result (RFS_FUSION, -1, tmp, tmp2);
2677       else
2678 	return rfs_result (RFS_FUSION, 1, tmp, tmp2);
2679     }
2680 
2681   if (sched_pressure != SCHED_PRESSURE_NONE)
2682     {
2683       /* Prefer insn whose scheduling results in the smallest register
2684 	 pressure excess.  */
2685       if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2686 		   + insn_delay (tmp)
2687 		   - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
2688 		   - insn_delay (tmp2))))
2689 	return rfs_result (RFS_PRESSURE_DELAY, diff, tmp, tmp2);
2690     }
2691 
2692   if (sched_pressure != SCHED_PRESSURE_NONE
2693       && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var)
2694       && INSN_TICK (tmp2) != INSN_TICK (tmp))
2695     {
2696       diff = INSN_TICK (tmp) - INSN_TICK (tmp2);
2697       return rfs_result (RFS_PRESSURE_TICK, diff, tmp, tmp2);
2698     }
2699 
2700   /* If we are doing backtracking in this schedule, prefer insns that
2701      have forward dependencies with negative cost against an insn that
2702      was already scheduled.  */
2703   if (current_sched_info->flags & DO_BACKTRACKING)
2704     {
2705       priority_val = FEEDS_BACKTRACK_INSN (tmp2) - FEEDS_BACKTRACK_INSN (tmp);
2706       if (priority_val)
2707 	return rfs_result (RFS_FEEDS_BACKTRACK_INSN, priority_val, tmp, tmp2);
2708     }
2709 
2710   /* Prefer insn with higher priority.  */
2711   priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
2712 
2713   if (flag_sched_critical_path_heuristic && priority_val)
2714     return rfs_result (RFS_PRIORITY, priority_val, tmp, tmp2);
2715 
2716   if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) >= 0)
2717     {
2718       int autopref = autopref_rank_for_schedule (tmp, tmp2);
2719       if (autopref != 0)
2720 	return autopref;
2721     }
2722 
2723   /* Prefer speculative insn with greater dependencies weakness.  */
2724   if (flag_sched_spec_insn_heuristic && spec_info)
2725     {
2726       ds_t ds1, ds2;
2727       dw_t dw1, dw2;
2728       int dw;
2729 
2730       ds1 = TODO_SPEC (tmp) & SPECULATIVE;
2731       if (ds1)
2732 	dw1 = ds_weak (ds1);
2733       else
2734 	dw1 = NO_DEP_WEAK;
2735 
2736       ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
2737       if (ds2)
2738 	dw2 = ds_weak (ds2);
2739       else
2740 	dw2 = NO_DEP_WEAK;
2741 
2742       dw = dw2 - dw1;
2743       if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
2744 	return rfs_result (RFS_SPECULATION, dw, tmp, tmp2);
2745     }
2746 
2747   info_val = (*current_sched_info->rank) (tmp, tmp2);
2748   if (flag_sched_rank_heuristic && info_val)
2749     return rfs_result (RFS_SCHED_RANK, info_val, tmp, tmp2);
2750 
2751   /* Compare insns based on their relation to the last scheduled
2752      non-debug insn.  */
2753   if (flag_sched_last_insn_heuristic && last_nondebug_scheduled_insn)
2754     {
2755       dep_t dep1;
2756       dep_t dep2;
2757       rtx_insn *last = last_nondebug_scheduled_insn;
2758 
2759       /* Classify the instructions into three classes:
2760          1) Data dependent on last schedule insn.
2761          2) Anti/Output dependent on last scheduled insn.
2762          3) Independent of last scheduled insn, or has latency of one.
2763          Choose the insn from the highest numbered class if different.  */
2764       dep1 = sd_find_dep_between (last, tmp, true);
2765 
2766       if (dep1 == NULL || dep_cost (dep1) == 1)
2767 	tmp_class = 3;
2768       else if (/* Data dependence.  */
2769 	       DEP_TYPE (dep1) == REG_DEP_TRUE)
2770 	tmp_class = 1;
2771       else
2772 	tmp_class = 2;
2773 
2774       dep2 = sd_find_dep_between (last, tmp2, true);
2775 
2776       if (dep2 == NULL || dep_cost (dep2)  == 1)
2777 	tmp2_class = 3;
2778       else if (/* Data dependence.  */
2779 	       DEP_TYPE (dep2) == REG_DEP_TRUE)
2780 	tmp2_class = 1;
2781       else
2782 	tmp2_class = 2;
2783 
2784       if ((val = tmp2_class - tmp_class))
2785 	return rfs_result (RFS_LAST_INSN, val, tmp, tmp2);
2786     }
2787 
2788   /* Prefer instructions that occur earlier in the model schedule.  */
2789   if (sched_pressure == SCHED_PRESSURE_MODEL)
2790     {
2791       diff = model_index (tmp) - model_index (tmp2);
2792       if (diff != 0)
2793 	return rfs_result (RFS_PRESSURE_INDEX, diff, tmp, tmp2);
2794     }
2795 
2796   /* Prefer the insn which has more later insns that depend on it.
2797      This gives the scheduler more freedom when scheduling later
2798      instructions at the expense of added register pressure.  */
2799 
2800   val = (dep_list_size (tmp2, SD_LIST_FORW)
2801 	 - dep_list_size (tmp, SD_LIST_FORW));
2802 
2803   if (flag_sched_dep_count_heuristic && val != 0)
2804     return rfs_result (RFS_DEP_COUNT, val, tmp, tmp2);
2805 
2806   /* Sort by INSN_COST rather than INSN_LUID.  This means that instructions
2807      which take longer to execute are prioritised and it leads to more
2808      dual-issue opportunities on in-order cores which have this feature.  */
2809 
2810   if (INSN_COST (tmp) != INSN_COST (tmp2))
2811     return rfs_result (RFS_COST, INSN_COST (tmp2) - INSN_COST (tmp),
2812 		       tmp, tmp2);
2813 
2814   /* If insns are equally good, sort by INSN_LUID (original insn order),
2815      so that we make the sort stable.  This minimizes instruction movement,
2816      thus minimizing sched's effect on debugging and cross-jumping.  */
2817   return rfs_result (RFS_TIE, INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2818 }
2819 
2820 /* Resort the array A in which only element at index N may be out of order.  */
2821 
2822 HAIFA_INLINE static void
swap_sort(rtx_insn ** a,int n)2823 swap_sort (rtx_insn **a, int n)
2824 {
2825   rtx_insn *insn = a[n - 1];
2826   int i = n - 2;
2827 
2828   while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
2829     {
2830       a[i + 1] = a[i];
2831       i -= 1;
2832     }
2833   a[i + 1] = insn;
2834 }
2835 
2836 /* Add INSN to the insn queue so that it can be executed at least
2837    N_CYCLES after the currently executing insn.  Preserve insns
2838    chain for debugging purposes.  REASON will be printed in debugging
2839    output.  */
2840 
2841 HAIFA_INLINE static void
queue_insn(rtx_insn * insn,int n_cycles,const char * reason)2842 queue_insn (rtx_insn *insn, int n_cycles, const char *reason)
2843 {
2844   int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
2845   rtx_insn_list *link = alloc_INSN_LIST (insn, insn_queue[next_q]);
2846   int new_tick;
2847 
2848   gcc_assert (n_cycles <= max_insn_queue_index);
2849   gcc_assert (!DEBUG_INSN_P (insn));
2850 
2851   insn_queue[next_q] = link;
2852   q_size += 1;
2853 
2854   if (sched_verbose >= 2)
2855     {
2856       fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
2857 	       (*current_sched_info->print_insn) (insn, 0));
2858 
2859       fprintf (sched_dump, "queued for %d cycles (%s).\n", n_cycles, reason);
2860     }
2861 
2862   QUEUE_INDEX (insn) = next_q;
2863 
2864   if (current_sched_info->flags & DO_BACKTRACKING)
2865     {
2866       new_tick = clock_var + n_cycles;
2867       if (INSN_TICK (insn) == INVALID_TICK || INSN_TICK (insn) < new_tick)
2868 	INSN_TICK (insn) = new_tick;
2869 
2870       if (INSN_EXACT_TICK (insn) != INVALID_TICK
2871 	  && INSN_EXACT_TICK (insn) < clock_var + n_cycles)
2872 	{
2873 	  must_backtrack = true;
2874 	  if (sched_verbose >= 2)
2875 	    fprintf (sched_dump, ";;\t\tcausing a backtrack.\n");
2876 	}
2877     }
2878 }
2879 
2880 /* Remove INSN from queue.  */
2881 static void
queue_remove(rtx_insn * insn)2882 queue_remove (rtx_insn *insn)
2883 {
2884   gcc_assert (QUEUE_INDEX (insn) >= 0);
2885   remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
2886   q_size--;
2887   QUEUE_INDEX (insn) = QUEUE_NOWHERE;
2888 }
2889 
2890 /* Return a pointer to the bottom of the ready list, i.e. the insn
2891    with the lowest priority.  */
2892 
2893 rtx_insn **
ready_lastpos(struct ready_list * ready)2894 ready_lastpos (struct ready_list *ready)
2895 {
2896   gcc_assert (ready->n_ready >= 1);
2897   return ready->vec + ready->first - ready->n_ready + 1;
2898 }
2899 
2900 /* Add an element INSN to the ready list so that it ends up with the
2901    lowest/highest priority depending on FIRST_P.  */
2902 
2903 HAIFA_INLINE static void
ready_add(struct ready_list * ready,rtx_insn * insn,bool first_p)2904 ready_add (struct ready_list *ready, rtx_insn *insn, bool first_p)
2905 {
2906   if (!first_p)
2907     {
2908       if (ready->first == ready->n_ready)
2909 	{
2910 	  memmove (ready->vec + ready->veclen - ready->n_ready,
2911 		   ready_lastpos (ready),
2912 		   ready->n_ready * sizeof (rtx));
2913 	  ready->first = ready->veclen - 1;
2914 	}
2915       ready->vec[ready->first - ready->n_ready] = insn;
2916     }
2917   else
2918     {
2919       if (ready->first == ready->veclen - 1)
2920 	{
2921 	  if (ready->n_ready)
2922 	    /* ready_lastpos() fails when called with (ready->n_ready == 0).  */
2923 	    memmove (ready->vec + ready->veclen - ready->n_ready - 1,
2924 		     ready_lastpos (ready),
2925 		     ready->n_ready * sizeof (rtx));
2926 	  ready->first = ready->veclen - 2;
2927 	}
2928       ready->vec[++(ready->first)] = insn;
2929     }
2930 
2931   ready->n_ready++;
2932   if (DEBUG_INSN_P (insn))
2933     ready->n_debug++;
2934 
2935   gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
2936   QUEUE_INDEX (insn) = QUEUE_READY;
2937 
2938   if (INSN_EXACT_TICK (insn) != INVALID_TICK
2939       && INSN_EXACT_TICK (insn) < clock_var)
2940     {
2941       must_backtrack = true;
2942     }
2943 }
2944 
2945 /* Remove the element with the highest priority from the ready list and
2946    return it.  */
2947 
2948 HAIFA_INLINE static rtx_insn *
ready_remove_first(struct ready_list * ready)2949 ready_remove_first (struct ready_list *ready)
2950 {
2951   rtx_insn *t;
2952 
2953   gcc_assert (ready->n_ready);
2954   t = ready->vec[ready->first--];
2955   ready->n_ready--;
2956   if (DEBUG_INSN_P (t))
2957     ready->n_debug--;
2958   /* If the queue becomes empty, reset it.  */
2959   if (ready->n_ready == 0)
2960     ready->first = ready->veclen - 1;
2961 
2962   gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
2963   QUEUE_INDEX (t) = QUEUE_NOWHERE;
2964 
2965   return t;
2966 }
2967 
2968 /* The following code implements multi-pass scheduling for the first
2969    cycle.  In other words, we will try to choose ready insn which
2970    permits to start maximum number of insns on the same cycle.  */
2971 
2972 /* Return a pointer to the element INDEX from the ready.  INDEX for
2973    insn with the highest priority is 0, and the lowest priority has
2974    N_READY - 1.  */
2975 
2976 rtx_insn *
ready_element(struct ready_list * ready,int index)2977 ready_element (struct ready_list *ready, int index)
2978 {
2979   gcc_assert (ready->n_ready && index < ready->n_ready);
2980 
2981   return ready->vec[ready->first - index];
2982 }
2983 
2984 /* Remove the element INDEX from the ready list and return it.  INDEX
2985    for insn with the highest priority is 0, and the lowest priority
2986    has N_READY - 1.  */
2987 
2988 HAIFA_INLINE static rtx_insn *
ready_remove(struct ready_list * ready,int index)2989 ready_remove (struct ready_list *ready, int index)
2990 {
2991   rtx_insn *t;
2992   int i;
2993 
2994   if (index == 0)
2995     return ready_remove_first (ready);
2996   gcc_assert (ready->n_ready && index < ready->n_ready);
2997   t = ready->vec[ready->first - index];
2998   ready->n_ready--;
2999   if (DEBUG_INSN_P (t))
3000     ready->n_debug--;
3001   for (i = index; i < ready->n_ready; i++)
3002     ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
3003   QUEUE_INDEX (t) = QUEUE_NOWHERE;
3004   return t;
3005 }
3006 
3007 /* Remove INSN from the ready list.  */
3008 static void
ready_remove_insn(rtx_insn * insn)3009 ready_remove_insn (rtx_insn *insn)
3010 {
3011   int i;
3012 
3013   for (i = 0; i < readyp->n_ready; i++)
3014     if (ready_element (readyp, i) == insn)
3015       {
3016         ready_remove (readyp, i);
3017         return;
3018       }
3019   gcc_unreachable ();
3020 }
3021 
3022 /* Calculate difference of two statistics set WAS and NOW.
3023    Result returned in WAS.  */
3024 static void
rank_for_schedule_stats_diff(rank_for_schedule_stats_t * was,const rank_for_schedule_stats_t * now)3025 rank_for_schedule_stats_diff (rank_for_schedule_stats_t *was,
3026 			      const rank_for_schedule_stats_t *now)
3027 {
3028   for (int i = 0; i < RFS_N; ++i)
3029     was->stats[i] = now->stats[i] - was->stats[i];
3030 }
3031 
3032 /* Print rank_for_schedule statistics.  */
3033 static void
print_rank_for_schedule_stats(const char * prefix,const rank_for_schedule_stats_t * stats,struct ready_list * ready)3034 print_rank_for_schedule_stats (const char *prefix,
3035 			       const rank_for_schedule_stats_t *stats,
3036 			       struct ready_list *ready)
3037 {
3038   for (int i = 0; i < RFS_N; ++i)
3039     if (stats->stats[i])
3040       {
3041 	fprintf (sched_dump, "%s%20s: %u", prefix, rfs_str[i], stats->stats[i]);
3042 
3043 	if (ready != NULL)
3044 	  /* Print out insns that won due to RFS_<I>.  */
3045 	  {
3046 	    rtx_insn **p = ready_lastpos (ready);
3047 
3048 	    fprintf (sched_dump, ":");
3049 	    /* Start with 1 since least-priority insn didn't have any wins.  */
3050 	    for (int j = 1; j < ready->n_ready; ++j)
3051 	      if (INSN_LAST_RFS_WIN (p[j]) == i)
3052 		fprintf (sched_dump, " %s",
3053 			 (*current_sched_info->print_insn) (p[j], 0));
3054 	  }
3055 	fprintf (sched_dump, "\n");
3056       }
3057 }
3058 
3059 /* Separate DEBUG_INSNS from normal insns.  DEBUG_INSNs go to the end
3060    of array.  */
3061 static void
ready_sort_debug(struct ready_list * ready)3062 ready_sort_debug (struct ready_list *ready)
3063 {
3064   int i;
3065   rtx_insn **first = ready_lastpos (ready);
3066 
3067   for (i = 0; i < ready->n_ready; ++i)
3068     if (!DEBUG_INSN_P (first[i]))
3069       INSN_RFS_DEBUG_ORIG_ORDER (first[i]) = i;
3070 
3071   qsort (first, ready->n_ready, sizeof (rtx), rank_for_schedule_debug);
3072 }
3073 
3074 /* Sort non-debug insns in the ready list READY by ascending priority.
3075    Assumes that all debug insns are separated from the real insns.  */
3076 static void
ready_sort_real(struct ready_list * ready)3077 ready_sort_real (struct ready_list *ready)
3078 {
3079   int i;
3080   rtx_insn **first = ready_lastpos (ready);
3081   int n_ready_real = ready->n_ready - ready->n_debug;
3082 
3083   if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
3084     for (i = 0; i < n_ready_real; ++i)
3085       setup_insn_reg_pressure_info (first[i]);
3086   else if (sched_pressure == SCHED_PRESSURE_MODEL
3087 	   && model_curr_point < model_num_insns)
3088     model_set_excess_costs (first, n_ready_real);
3089 
3090   rank_for_schedule_stats_t stats1;
3091   if (sched_verbose >= 4)
3092     stats1 = rank_for_schedule_stats;
3093 
3094   if (n_ready_real == 2)
3095     swap_sort (first, n_ready_real);
3096   else if (n_ready_real > 2)
3097     qsort (first, n_ready_real, sizeof (rtx), rank_for_schedule);
3098 
3099   if (sched_verbose >= 4)
3100     {
3101       rank_for_schedule_stats_diff (&stats1, &rank_for_schedule_stats);
3102       print_rank_for_schedule_stats (";;\t\t", &stats1, ready);
3103     }
3104 }
3105 
3106 /* Sort the ready list READY by ascending priority.  */
3107 static void
ready_sort(struct ready_list * ready)3108 ready_sort (struct ready_list *ready)
3109 {
3110   if (ready->n_debug > 0)
3111     ready_sort_debug (ready);
3112   else
3113     ready_sort_real (ready);
3114 }
3115 
3116 /* PREV is an insn that is ready to execute.  Adjust its priority if that
3117    will help shorten or lengthen register lifetimes as appropriate.  Also
3118    provide a hook for the target to tweak itself.  */
3119 
3120 HAIFA_INLINE static void
adjust_priority(rtx_insn * prev)3121 adjust_priority (rtx_insn *prev)
3122 {
3123   /* ??? There used to be code here to try and estimate how an insn
3124      affected register lifetimes, but it did it by looking at REG_DEAD
3125      notes, which we removed in schedule_region.  Nor did it try to
3126      take into account register pressure or anything useful like that.
3127 
3128      Revisit when we have a machine model to work with and not before.  */
3129 
3130   if (targetm.sched.adjust_priority)
3131     INSN_PRIORITY (prev) =
3132       targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
3133 }
3134 
3135 /* Advance DFA state STATE on one cycle.  */
3136 void
advance_state(state_t state)3137 advance_state (state_t state)
3138 {
3139   if (targetm.sched.dfa_pre_advance_cycle)
3140     targetm.sched.dfa_pre_advance_cycle ();
3141 
3142   if (targetm.sched.dfa_pre_cycle_insn)
3143     state_transition (state,
3144 		      targetm.sched.dfa_pre_cycle_insn ());
3145 
3146   state_transition (state, NULL);
3147 
3148   if (targetm.sched.dfa_post_cycle_insn)
3149     state_transition (state,
3150 		      targetm.sched.dfa_post_cycle_insn ());
3151 
3152   if (targetm.sched.dfa_post_advance_cycle)
3153     targetm.sched.dfa_post_advance_cycle ();
3154 }
3155 
3156 /* Advance time on one cycle.  */
3157 HAIFA_INLINE static void
advance_one_cycle(void)3158 advance_one_cycle (void)
3159 {
3160   advance_state (curr_state);
3161   if (sched_verbose >= 4)
3162     fprintf (sched_dump, ";;\tAdvance the current state.\n");
3163 }
3164 
3165 /* Update register pressure after scheduling INSN.  */
3166 static void
update_register_pressure(rtx_insn * insn)3167 update_register_pressure (rtx_insn *insn)
3168 {
3169   struct reg_use_data *use;
3170   struct reg_set_data *set;
3171 
3172   gcc_checking_assert (!DEBUG_INSN_P (insn));
3173 
3174   for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
3175     if (dying_use_p (use))
3176       mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
3177 				 use->regno, false);
3178   for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
3179     mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
3180 			       set->regno, true);
3181 }
3182 
3183 /* Set up or update (if UPDATE_P) max register pressure (see its
3184    meaning in sched-int.h::_haifa_insn_data) for all current BB insns
3185    after insn AFTER.  */
3186 static void
setup_insn_max_reg_pressure(rtx_insn * after,bool update_p)3187 setup_insn_max_reg_pressure (rtx_insn *after, bool update_p)
3188 {
3189   int i, p;
3190   bool eq_p;
3191   rtx_insn *insn;
3192   static int max_reg_pressure[N_REG_CLASSES];
3193 
3194   save_reg_pressure ();
3195   for (i = 0; i < ira_pressure_classes_num; i++)
3196     max_reg_pressure[ira_pressure_classes[i]]
3197       = curr_reg_pressure[ira_pressure_classes[i]];
3198   for (insn = NEXT_INSN (after);
3199        insn != NULL_RTX && ! BARRIER_P (insn)
3200 	 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
3201        insn = NEXT_INSN (insn))
3202     if (NONDEBUG_INSN_P (insn))
3203       {
3204 	eq_p = true;
3205 	for (i = 0; i < ira_pressure_classes_num; i++)
3206 	  {
3207 	    p = max_reg_pressure[ira_pressure_classes[i]];
3208 	    if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
3209 	      {
3210 		eq_p = false;
3211 		INSN_MAX_REG_PRESSURE (insn)[i]
3212 		  = max_reg_pressure[ira_pressure_classes[i]];
3213 	      }
3214 	  }
3215 	if (update_p && eq_p)
3216 	  break;
3217 	update_register_pressure (insn);
3218 	for (i = 0; i < ira_pressure_classes_num; i++)
3219 	  if (max_reg_pressure[ira_pressure_classes[i]]
3220 	      < curr_reg_pressure[ira_pressure_classes[i]])
3221 	    max_reg_pressure[ira_pressure_classes[i]]
3222 	      = curr_reg_pressure[ira_pressure_classes[i]];
3223       }
3224   restore_reg_pressure ();
3225 }
3226 
3227 /* Update the current register pressure after scheduling INSN.  Update
3228    also max register pressure for unscheduled insns of the current
3229    BB.  */
3230 static void
update_reg_and_insn_max_reg_pressure(rtx_insn * insn)3231 update_reg_and_insn_max_reg_pressure (rtx_insn *insn)
3232 {
3233   int i;
3234   int before[N_REG_CLASSES];
3235 
3236   for (i = 0; i < ira_pressure_classes_num; i++)
3237     before[i] = curr_reg_pressure[ira_pressure_classes[i]];
3238   update_register_pressure (insn);
3239   for (i = 0; i < ira_pressure_classes_num; i++)
3240     if (curr_reg_pressure[ira_pressure_classes[i]] != before[i])
3241       break;
3242   if (i < ira_pressure_classes_num)
3243     setup_insn_max_reg_pressure (insn, true);
3244 }
3245 
3246 /* Set up register pressure at the beginning of basic block BB whose
3247    insns starting after insn AFTER.  Set up also max register pressure
3248    for all insns of the basic block.  */
3249 void
sched_setup_bb_reg_pressure_info(basic_block bb,rtx_insn * after)3250 sched_setup_bb_reg_pressure_info (basic_block bb, rtx_insn *after)
3251 {
3252   gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
3253   initiate_bb_reg_pressure_info (bb);
3254   setup_insn_max_reg_pressure (after, false);
3255 }
3256 
3257 /* If doing predication while scheduling, verify whether INSN, which
3258    has just been scheduled, clobbers the conditions of any
3259    instructions that must be predicated in order to break their
3260    dependencies.  If so, remove them from the queues so that they will
3261    only be scheduled once their control dependency is resolved.  */
3262 
3263 static void
check_clobbered_conditions(rtx_insn * insn)3264 check_clobbered_conditions (rtx_insn *insn)
3265 {
3266   HARD_REG_SET t;
3267   int i;
3268 
3269   if ((current_sched_info->flags & DO_PREDICATION) == 0)
3270     return;
3271 
3272   find_all_hard_reg_sets (insn, &t, true);
3273 
3274  restart:
3275   for (i = 0; i < ready.n_ready; i++)
3276     {
3277       rtx_insn *x = ready_element (&ready, i);
3278       if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3279 	{
3280 	  ready_remove_insn (x);
3281 	  goto restart;
3282 	}
3283     }
3284   for (i = 0; i <= max_insn_queue_index; i++)
3285     {
3286       rtx_insn_list *link;
3287       int q = NEXT_Q_AFTER (q_ptr, i);
3288 
3289     restart_queue:
3290       for (link = insn_queue[q]; link; link = link->next ())
3291 	{
3292 	  rtx_insn *x = link->insn ();
3293 	  if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3294 	    {
3295 	      queue_remove (x);
3296 	      goto restart_queue;
3297 	    }
3298 	}
3299     }
3300 }
3301 
3302 /* Return (in order):
3303 
3304    - positive if INSN adversely affects the pressure on one
3305      register class
3306 
3307    - negative if INSN reduces the pressure on one register class
3308 
3309    - 0 if INSN doesn't affect the pressure on any register class.  */
3310 
3311 static int
model_classify_pressure(struct model_insn_info * insn)3312 model_classify_pressure (struct model_insn_info *insn)
3313 {
3314   struct reg_pressure_data *reg_pressure;
3315   int death[N_REG_CLASSES];
3316   int pci, cl, sum;
3317 
3318   calculate_reg_deaths (insn->insn, death);
3319   reg_pressure = INSN_REG_PRESSURE (insn->insn);
3320   sum = 0;
3321   for (pci = 0; pci < ira_pressure_classes_num; pci++)
3322     {
3323       cl = ira_pressure_classes[pci];
3324       if (death[cl] < reg_pressure[pci].set_increase)
3325 	return 1;
3326       sum += reg_pressure[pci].set_increase - death[cl];
3327     }
3328   return sum;
3329 }
3330 
3331 /* Return true if INSN1 should come before INSN2 in the model schedule.  */
3332 
3333 static int
model_order_p(struct model_insn_info * insn1,struct model_insn_info * insn2)3334 model_order_p (struct model_insn_info *insn1, struct model_insn_info *insn2)
3335 {
3336   unsigned int height1, height2;
3337   unsigned int priority1, priority2;
3338 
3339   /* Prefer instructions with a higher model priority.  */
3340   if (insn1->model_priority != insn2->model_priority)
3341     return insn1->model_priority > insn2->model_priority;
3342 
3343   /* Combine the length of the longest path of satisfied true dependencies
3344      that leads to each instruction (depth) with the length of the longest
3345      path of any dependencies that leads from the instruction (alap).
3346      Prefer instructions with the greatest combined length.  If the combined
3347      lengths are equal, prefer instructions with the greatest depth.
3348 
3349      The idea is that, if we have a set S of "equal" instructions that each
3350      have ALAP value X, and we pick one such instruction I, any true-dependent
3351      successors of I that have ALAP value X - 1 should be preferred over S.
3352      This encourages the schedule to be "narrow" rather than "wide".
3353      However, if I is a low-priority instruction that we decided to
3354      schedule because of its model_classify_pressure, and if there
3355      is a set of higher-priority instructions T, the aforementioned
3356      successors of I should not have the edge over T.  */
3357   height1 = insn1->depth + insn1->alap;
3358   height2 = insn2->depth + insn2->alap;
3359   if (height1 != height2)
3360     return height1 > height2;
3361   if (insn1->depth != insn2->depth)
3362     return insn1->depth > insn2->depth;
3363 
3364   /* We have no real preference between INSN1 an INSN2 as far as attempts
3365      to reduce pressure go.  Prefer instructions with higher priorities.  */
3366   priority1 = INSN_PRIORITY (insn1->insn);
3367   priority2 = INSN_PRIORITY (insn2->insn);
3368   if (priority1 != priority2)
3369     return priority1 > priority2;
3370 
3371   /* Use the original rtl sequence as a tie-breaker.  */
3372   return insn1 < insn2;
3373 }
3374 
3375 /* Add INSN to the model worklist immediately after PREV.  Add it to the
3376    beginning of the list if PREV is null.  */
3377 
3378 static void
model_add_to_worklist_at(struct model_insn_info * insn,struct model_insn_info * prev)3379 model_add_to_worklist_at (struct model_insn_info *insn,
3380 			  struct model_insn_info *prev)
3381 {
3382   gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_NOWHERE);
3383   QUEUE_INDEX (insn->insn) = QUEUE_READY;
3384 
3385   insn->prev = prev;
3386   if (prev)
3387     {
3388       insn->next = prev->next;
3389       prev->next = insn;
3390     }
3391   else
3392     {
3393       insn->next = model_worklist;
3394       model_worklist = insn;
3395     }
3396   if (insn->next)
3397     insn->next->prev = insn;
3398 }
3399 
3400 /* Remove INSN from the model worklist.  */
3401 
3402 static void
model_remove_from_worklist(struct model_insn_info * insn)3403 model_remove_from_worklist (struct model_insn_info *insn)
3404 {
3405   gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_READY);
3406   QUEUE_INDEX (insn->insn) = QUEUE_NOWHERE;
3407 
3408   if (insn->prev)
3409     insn->prev->next = insn->next;
3410   else
3411     model_worklist = insn->next;
3412   if (insn->next)
3413     insn->next->prev = insn->prev;
3414 }
3415 
3416 /* Add INSN to the model worklist.  Start looking for a suitable position
3417    between neighbors PREV and NEXT, testing at most MAX_SCHED_READY_INSNS
3418    insns either side.  A null PREV indicates the beginning of the list and
3419    a null NEXT indicates the end.  */
3420 
3421 static void
model_add_to_worklist(struct model_insn_info * insn,struct model_insn_info * prev,struct model_insn_info * next)3422 model_add_to_worklist (struct model_insn_info *insn,
3423 		       struct model_insn_info *prev,
3424 		       struct model_insn_info *next)
3425 {
3426   int count;
3427 
3428   count = MAX_SCHED_READY_INSNS;
3429   if (count > 0 && prev && model_order_p (insn, prev))
3430     do
3431       {
3432 	count--;
3433 	prev = prev->prev;
3434       }
3435     while (count > 0 && prev && model_order_p (insn, prev));
3436   else
3437     while (count > 0 && next && model_order_p (next, insn))
3438       {
3439 	count--;
3440 	prev = next;
3441 	next = next->next;
3442       }
3443   model_add_to_worklist_at (insn, prev);
3444 }
3445 
3446 /* INSN may now have a higher priority (in the model_order_p sense)
3447    than before.  Move it up the worklist if necessary.  */
3448 
3449 static void
model_promote_insn(struct model_insn_info * insn)3450 model_promote_insn (struct model_insn_info *insn)
3451 {
3452   struct model_insn_info *prev;
3453   int count;
3454 
3455   prev = insn->prev;
3456   count = MAX_SCHED_READY_INSNS;
3457   while (count > 0 && prev && model_order_p (insn, prev))
3458     {
3459       count--;
3460       prev = prev->prev;
3461     }
3462   if (prev != insn->prev)
3463     {
3464       model_remove_from_worklist (insn);
3465       model_add_to_worklist_at (insn, prev);
3466     }
3467 }
3468 
3469 /* Add INSN to the end of the model schedule.  */
3470 
3471 static void
model_add_to_schedule(rtx_insn * insn)3472 model_add_to_schedule (rtx_insn *insn)
3473 {
3474   unsigned int point;
3475 
3476   gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
3477   QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
3478 
3479   point = model_schedule.length ();
3480   model_schedule.quick_push (insn);
3481   INSN_MODEL_INDEX (insn) = point + 1;
3482 }
3483 
3484 /* Analyze the instructions that are to be scheduled, setting up
3485    MODEL_INSN_INFO (...) and model_num_insns accordingly.  Add ready
3486    instructions to model_worklist.  */
3487 
3488 static void
model_analyze_insns(void)3489 model_analyze_insns (void)
3490 {
3491   rtx_insn *start, *end, *iter;
3492   sd_iterator_def sd_it;
3493   dep_t dep;
3494   struct model_insn_info *insn, *con;
3495 
3496   model_num_insns = 0;
3497   start = PREV_INSN (current_sched_info->next_tail);
3498   end = current_sched_info->prev_head;
3499   for (iter = start; iter != end; iter = PREV_INSN (iter))
3500     if (NONDEBUG_INSN_P (iter))
3501       {
3502 	insn = MODEL_INSN_INFO (iter);
3503 	insn->insn = iter;
3504 	FOR_EACH_DEP (iter, SD_LIST_FORW, sd_it, dep)
3505 	  {
3506 	    con = MODEL_INSN_INFO (DEP_CON (dep));
3507 	    if (con->insn && insn->alap < con->alap + 1)
3508 	      insn->alap = con->alap + 1;
3509 	  }
3510 
3511 	insn->old_queue = QUEUE_INDEX (iter);
3512 	QUEUE_INDEX (iter) = QUEUE_NOWHERE;
3513 
3514 	insn->unscheduled_preds = dep_list_size (iter, SD_LIST_HARD_BACK);
3515 	if (insn->unscheduled_preds == 0)
3516 	  model_add_to_worklist (insn, NULL, model_worklist);
3517 
3518 	model_num_insns++;
3519       }
3520 }
3521 
3522 /* The global state describes the register pressure at the start of the
3523    model schedule.  Initialize GROUP accordingly.  */
3524 
3525 static void
model_init_pressure_group(struct model_pressure_group * group)3526 model_init_pressure_group (struct model_pressure_group *group)
3527 {
3528   int pci, cl;
3529 
3530   for (pci = 0; pci < ira_pressure_classes_num; pci++)
3531     {
3532       cl = ira_pressure_classes[pci];
3533       group->limits[pci].pressure = curr_reg_pressure[cl];
3534       group->limits[pci].point = 0;
3535     }
3536   /* Use index model_num_insns to record the state after the last
3537      instruction in the model schedule.  */
3538   group->model = XNEWVEC (struct model_pressure_data,
3539 			  (model_num_insns + 1) * ira_pressure_classes_num);
3540 }
3541 
3542 /* Record that MODEL_REF_PRESSURE (GROUP, POINT, PCI) is PRESSURE.
3543    Update the maximum pressure for the whole schedule.  */
3544 
3545 static void
model_record_pressure(struct model_pressure_group * group,int point,int pci,int pressure)3546 model_record_pressure (struct model_pressure_group *group,
3547 		       int point, int pci, int pressure)
3548 {
3549   MODEL_REF_PRESSURE (group, point, pci) = pressure;
3550   if (group->limits[pci].pressure < pressure)
3551     {
3552       group->limits[pci].pressure = pressure;
3553       group->limits[pci].point = point;
3554     }
3555 }
3556 
3557 /* INSN has just been added to the end of the model schedule.  Record its
3558    register-pressure information.  */
3559 
3560 static void
model_record_pressures(struct model_insn_info * insn)3561 model_record_pressures (struct model_insn_info *insn)
3562 {
3563   struct reg_pressure_data *reg_pressure;
3564   int point, pci, cl, delta;
3565   int death[N_REG_CLASSES];
3566 
3567   point = model_index (insn->insn);
3568   if (sched_verbose >= 2)
3569     {
3570       if (point == 0)
3571 	{
3572 	  fprintf (sched_dump, "\n;;\tModel schedule:\n;;\n");
3573 	  fprintf (sched_dump, ";;\t| idx insn | mpri hght dpth prio |\n");
3574 	}
3575       fprintf (sched_dump, ";;\t| %3d %4d | %4d %4d %4d %4d | %-30s ",
3576 	       point, INSN_UID (insn->insn), insn->model_priority,
3577 	       insn->depth + insn->alap, insn->depth,
3578 	       INSN_PRIORITY (insn->insn),
3579 	       str_pattern_slim (PATTERN (insn->insn)));
3580     }
3581   calculate_reg_deaths (insn->insn, death);
3582   reg_pressure = INSN_REG_PRESSURE (insn->insn);
3583   for (pci = 0; pci < ira_pressure_classes_num; pci++)
3584     {
3585       cl = ira_pressure_classes[pci];
3586       delta = reg_pressure[pci].set_increase - death[cl];
3587       if (sched_verbose >= 2)
3588 	fprintf (sched_dump, " %s:[%d,%+d]", reg_class_names[cl],
3589 		 curr_reg_pressure[cl], delta);
3590       model_record_pressure (&model_before_pressure, point, pci,
3591 			     curr_reg_pressure[cl]);
3592     }
3593   if (sched_verbose >= 2)
3594     fprintf (sched_dump, "\n");
3595 }
3596 
3597 /* All instructions have been added to the model schedule.  Record the
3598    final register pressure in GROUP and set up all MODEL_MAX_PRESSUREs.  */
3599 
3600 static void
model_record_final_pressures(struct model_pressure_group * group)3601 model_record_final_pressures (struct model_pressure_group *group)
3602 {
3603   int point, pci, max_pressure, ref_pressure, cl;
3604 
3605   for (pci = 0; pci < ira_pressure_classes_num; pci++)
3606     {
3607       /* Record the final pressure for this class.  */
3608       cl = ira_pressure_classes[pci];
3609       point = model_num_insns;
3610       ref_pressure = curr_reg_pressure[cl];
3611       model_record_pressure (group, point, pci, ref_pressure);
3612 
3613       /* Record the original maximum pressure.  */
3614       group->limits[pci].orig_pressure = group->limits[pci].pressure;
3615 
3616       /* Update the MODEL_MAX_PRESSURE for every point of the schedule.  */
3617       max_pressure = ref_pressure;
3618       MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3619       while (point > 0)
3620 	{
3621 	  point--;
3622 	  ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
3623 	  max_pressure = MAX (max_pressure, ref_pressure);
3624 	  MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3625 	}
3626     }
3627 }
3628 
3629 /* Update all successors of INSN, given that INSN has just been scheduled.  */
3630 
3631 static void
model_add_successors_to_worklist(struct model_insn_info * insn)3632 model_add_successors_to_worklist (struct model_insn_info *insn)
3633 {
3634   sd_iterator_def sd_it;
3635   struct model_insn_info *con;
3636   dep_t dep;
3637 
3638   FOR_EACH_DEP (insn->insn, SD_LIST_FORW, sd_it, dep)
3639     {
3640       con = MODEL_INSN_INFO (DEP_CON (dep));
3641       /* Ignore debug instructions, and instructions from other blocks.  */
3642       if (con->insn)
3643 	{
3644 	  con->unscheduled_preds--;
3645 
3646 	  /* Update the depth field of each true-dependent successor.
3647 	     Increasing the depth gives them a higher priority than
3648 	     before.  */
3649 	  if (DEP_TYPE (dep) == REG_DEP_TRUE && con->depth < insn->depth + 1)
3650 	    {
3651 	      con->depth = insn->depth + 1;
3652 	      if (QUEUE_INDEX (con->insn) == QUEUE_READY)
3653 		model_promote_insn (con);
3654 	    }
3655 
3656 	  /* If this is a true dependency, or if there are no remaining
3657 	     dependencies for CON (meaning that CON only had non-true
3658 	     dependencies), make sure that CON is on the worklist.
3659 	     We don't bother otherwise because it would tend to fill the
3660 	     worklist with a lot of low-priority instructions that are not
3661 	     yet ready to issue.  */
3662 	  if ((con->depth > 0 || con->unscheduled_preds == 0)
3663 	      && QUEUE_INDEX (con->insn) == QUEUE_NOWHERE)
3664 	    model_add_to_worklist (con, insn, insn->next);
3665 	}
3666     }
3667 }
3668 
3669 /* Give INSN a higher priority than any current instruction, then give
3670    unscheduled predecessors of INSN a higher priority still.  If any of
3671    those predecessors are not on the model worklist, do the same for its
3672    predecessors, and so on.  */
3673 
3674 static void
model_promote_predecessors(struct model_insn_info * insn)3675 model_promote_predecessors (struct model_insn_info *insn)
3676 {
3677   struct model_insn_info *pro, *first;
3678   sd_iterator_def sd_it;
3679   dep_t dep;
3680 
3681   if (sched_verbose >= 7)
3682     fprintf (sched_dump, ";;\t+--- priority of %d = %d, priority of",
3683 	     INSN_UID (insn->insn), model_next_priority);
3684   insn->model_priority = model_next_priority++;
3685   model_remove_from_worklist (insn);
3686   model_add_to_worklist_at (insn, NULL);
3687 
3688   first = NULL;
3689   for (;;)
3690     {
3691       FOR_EACH_DEP (insn->insn, SD_LIST_HARD_BACK, sd_it, dep)
3692 	{
3693 	  pro = MODEL_INSN_INFO (DEP_PRO (dep));
3694 	  /* The first test is to ignore debug instructions, and instructions
3695 	     from other blocks.  */
3696 	  if (pro->insn
3697 	      && pro->model_priority != model_next_priority
3698 	      && QUEUE_INDEX (pro->insn) != QUEUE_SCHEDULED)
3699 	    {
3700 	      pro->model_priority = model_next_priority;
3701 	      if (sched_verbose >= 7)
3702 		fprintf (sched_dump, " %d", INSN_UID (pro->insn));
3703 	      if (QUEUE_INDEX (pro->insn) == QUEUE_READY)
3704 		{
3705 		  /* PRO is already in the worklist, but it now has
3706 		     a higher priority than before.  Move it at the
3707 		     appropriate place.  */
3708 		  model_remove_from_worklist (pro);
3709 		  model_add_to_worklist (pro, NULL, model_worklist);
3710 		}
3711 	      else
3712 		{
3713 		  /* PRO isn't in the worklist.  Recursively process
3714 		     its predecessors until we find one that is.  */
3715 		  pro->next = first;
3716 		  first = pro;
3717 		}
3718 	    }
3719 	}
3720       if (!first)
3721 	break;
3722       insn = first;
3723       first = insn->next;
3724     }
3725   if (sched_verbose >= 7)
3726     fprintf (sched_dump, " = %d\n", model_next_priority);
3727   model_next_priority++;
3728 }
3729 
3730 /* Pick one instruction from model_worklist and process it.  */
3731 
3732 static void
model_choose_insn(void)3733 model_choose_insn (void)
3734 {
3735   struct model_insn_info *insn, *fallback;
3736   int count;
3737 
3738   if (sched_verbose >= 7)
3739     {
3740       fprintf (sched_dump, ";;\t+--- worklist:\n");
3741       insn = model_worklist;
3742       count = MAX_SCHED_READY_INSNS;
3743       while (count > 0 && insn)
3744 	{
3745 	  fprintf (sched_dump, ";;\t+---   %d [%d, %d, %d, %d]\n",
3746 		   INSN_UID (insn->insn), insn->model_priority,
3747 		   insn->depth + insn->alap, insn->depth,
3748 		   INSN_PRIORITY (insn->insn));
3749 	  count--;
3750 	  insn = insn->next;
3751 	}
3752     }
3753 
3754   /* Look for a ready instruction whose model_classify_priority is zero
3755      or negative, picking the highest-priority one.  Adding such an
3756      instruction to the schedule now should do no harm, and may actually
3757      do some good.
3758 
3759      Failing that, see whether there is an instruction with the highest
3760      extant model_priority that is not yet ready, but which would reduce
3761      pressure if it became ready.  This is designed to catch cases like:
3762 
3763        (set (mem (reg R1)) (reg R2))
3764 
3765      where the instruction is the last remaining use of R1 and where the
3766      value of R2 is not yet available (or vice versa).  The death of R1
3767      means that this instruction already reduces pressure.  It is of
3768      course possible that the computation of R2 involves other registers
3769      that are hard to kill, but such cases are rare enough for this
3770      heuristic to be a win in general.
3771 
3772      Failing that, just pick the highest-priority instruction in the
3773      worklist.  */
3774   count = MAX_SCHED_READY_INSNS;
3775   insn = model_worklist;
3776   fallback = 0;
3777   for (;;)
3778     {
3779       if (count == 0 || !insn)
3780 	{
3781 	  insn = fallback ? fallback : model_worklist;
3782 	  break;
3783 	}
3784       if (insn->unscheduled_preds)
3785 	{
3786 	  if (model_worklist->model_priority == insn->model_priority
3787 	      && !fallback
3788 	      && model_classify_pressure (insn) < 0)
3789 	    fallback = insn;
3790 	}
3791       else
3792 	{
3793 	  if (model_classify_pressure (insn) <= 0)
3794 	    break;
3795 	}
3796       count--;
3797       insn = insn->next;
3798     }
3799 
3800   if (sched_verbose >= 7 && insn != model_worklist)
3801     {
3802       if (insn->unscheduled_preds)
3803 	fprintf (sched_dump, ";;\t+--- promoting insn %d, with dependencies\n",
3804 		 INSN_UID (insn->insn));
3805       else
3806 	fprintf (sched_dump, ";;\t+--- promoting insn %d, which is ready\n",
3807 		 INSN_UID (insn->insn));
3808     }
3809   if (insn->unscheduled_preds)
3810     /* INSN isn't yet ready to issue.  Give all its predecessors the
3811        highest priority.  */
3812     model_promote_predecessors (insn);
3813   else
3814     {
3815       /* INSN is ready.  Add it to the end of model_schedule and
3816 	 process its successors.  */
3817       model_add_successors_to_worklist (insn);
3818       model_remove_from_worklist (insn);
3819       model_add_to_schedule (insn->insn);
3820       model_record_pressures (insn);
3821       update_register_pressure (insn->insn);
3822     }
3823 }
3824 
3825 /* Restore all QUEUE_INDEXs to the values that they had before
3826    model_start_schedule was called.  */
3827 
3828 static void
model_reset_queue_indices(void)3829 model_reset_queue_indices (void)
3830 {
3831   unsigned int i;
3832   rtx_insn *insn;
3833 
3834   FOR_EACH_VEC_ELT (model_schedule, i, insn)
3835     QUEUE_INDEX (insn) = MODEL_INSN_INFO (insn)->old_queue;
3836 }
3837 
3838 /* We have calculated the model schedule and spill costs.  Print a summary
3839    to sched_dump.  */
3840 
3841 static void
model_dump_pressure_summary(void)3842 model_dump_pressure_summary (void)
3843 {
3844   int pci, cl;
3845 
3846   fprintf (sched_dump, ";; Pressure summary:");
3847   for (pci = 0; pci < ira_pressure_classes_num; pci++)
3848     {
3849       cl = ira_pressure_classes[pci];
3850       fprintf (sched_dump, " %s:%d", reg_class_names[cl],
3851 	       model_before_pressure.limits[pci].pressure);
3852     }
3853   fprintf (sched_dump, "\n\n");
3854 }
3855 
3856 /* Initialize the SCHED_PRESSURE_MODEL information for the current
3857    scheduling region.  */
3858 
3859 static void
model_start_schedule(basic_block bb)3860 model_start_schedule (basic_block bb)
3861 {
3862   model_next_priority = 1;
3863   model_schedule.create (sched_max_luid);
3864   model_insns = XCNEWVEC (struct model_insn_info, sched_max_luid);
3865 
3866   gcc_assert (bb == BLOCK_FOR_INSN (NEXT_INSN (current_sched_info->prev_head)));
3867   initiate_reg_pressure_info (df_get_live_in (bb));
3868 
3869   model_analyze_insns ();
3870   model_init_pressure_group (&model_before_pressure);
3871   while (model_worklist)
3872     model_choose_insn ();
3873   gcc_assert (model_num_insns == (int) model_schedule.length ());
3874   if (sched_verbose >= 2)
3875     fprintf (sched_dump, "\n");
3876 
3877   model_record_final_pressures (&model_before_pressure);
3878   model_reset_queue_indices ();
3879 
3880   XDELETEVEC (model_insns);
3881 
3882   model_curr_point = 0;
3883   initiate_reg_pressure_info (df_get_live_in (bb));
3884   if (sched_verbose >= 1)
3885     model_dump_pressure_summary ();
3886 }
3887 
3888 /* Free the information associated with GROUP.  */
3889 
3890 static void
model_finalize_pressure_group(struct model_pressure_group * group)3891 model_finalize_pressure_group (struct model_pressure_group *group)
3892 {
3893   XDELETEVEC (group->model);
3894 }
3895 
3896 /* Free the information created by model_start_schedule.  */
3897 
3898 static void
model_end_schedule(void)3899 model_end_schedule (void)
3900 {
3901   model_finalize_pressure_group (&model_before_pressure);
3902   model_schedule.release ();
3903 }
3904 
3905 /* Prepare reg pressure scheduling for basic block BB.  */
3906 static void
sched_pressure_start_bb(basic_block bb)3907 sched_pressure_start_bb (basic_block bb)
3908 {
3909   /* Set the number of available registers for each class taking into account
3910      relative probability of current basic block versus function prologue and
3911      epilogue.
3912      * If the basic block executes much more often than the prologue/epilogue
3913      (e.g., inside a hot loop), then cost of spill in the prologue is close to
3914      nil, so the effective number of available registers is
3915      (ira_class_hard_regs_num[cl] - fixed_regs_num[cl] - 0).
3916      * If the basic block executes as often as the prologue/epilogue,
3917      then spill in the block is as costly as in the prologue, so the effective
3918      number of available registers is
3919      (ira_class_hard_regs_num[cl] - fixed_regs_num[cl]
3920       - call_saved_regs_num[cl]).
3921      Note that all-else-equal, we prefer to spill in the prologue, since that
3922      allows "extra" registers for other basic blocks of the function.
3923      * If the basic block is on the cold path of the function and executes
3924      rarely, then we should always prefer to spill in the block, rather than
3925      in the prologue/epilogue.  The effective number of available register is
3926      (ira_class_hard_regs_num[cl] - fixed_regs_num[cl]
3927       - call_saved_regs_num[cl]).  */
3928   {
3929     int i;
3930     int entry_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.to_frequency (cfun);
3931     int bb_freq = bb->count.to_frequency (cfun);
3932 
3933     if (bb_freq == 0)
3934       {
3935 	if (entry_freq == 0)
3936 	  entry_freq = bb_freq = 1;
3937       }
3938     if (bb_freq < entry_freq)
3939       bb_freq = entry_freq;
3940 
3941     for (i = 0; i < ira_pressure_classes_num; ++i)
3942       {
3943 	enum reg_class cl = ira_pressure_classes[i];
3944 	sched_class_regs_num[cl] = ira_class_hard_regs_num[cl]
3945 				   - fixed_regs_num[cl];
3946 	sched_class_regs_num[cl]
3947 	  -= (call_saved_regs_num[cl] * entry_freq) / bb_freq;
3948       }
3949   }
3950 
3951   if (sched_pressure == SCHED_PRESSURE_MODEL)
3952     model_start_schedule (bb);
3953 }
3954 
3955 /* A structure that holds local state for the loop in schedule_block.  */
3956 struct sched_block_state
3957 {
3958   /* True if no real insns have been scheduled in the current cycle.  */
3959   bool first_cycle_insn_p;
3960   /* True if a shadow insn has been scheduled in the current cycle, which
3961      means that no more normal insns can be issued.  */
3962   bool shadows_only_p;
3963   /* True if we're winding down a modulo schedule, which means that we only
3964      issue insns with INSN_EXACT_TICK set.  */
3965   bool modulo_epilogue;
3966   /* Initialized with the machine's issue rate every cycle, and updated
3967      by calls to the variable_issue hook.  */
3968   int can_issue_more;
3969 };
3970 
3971 /* INSN is the "currently executing insn".  Launch each insn which was
3972    waiting on INSN.  READY is the ready list which contains the insns
3973    that are ready to fire.  CLOCK is the current cycle.  The function
3974    returns necessary cycle advance after issuing the insn (it is not
3975    zero for insns in a schedule group).  */
3976 
3977 static int
schedule_insn(rtx_insn * insn)3978 schedule_insn (rtx_insn *insn)
3979 {
3980   sd_iterator_def sd_it;
3981   dep_t dep;
3982   int i;
3983   int advance = 0;
3984 
3985   if (sched_verbose >= 1)
3986     {
3987       struct reg_pressure_data *pressure_info;
3988       fprintf (sched_dump, ";;\t%3i--> %s %-40s:",
3989 	       clock_var, (*current_sched_info->print_insn) (insn, 1),
3990 	       str_pattern_slim (PATTERN (insn)));
3991 
3992       if (recog_memoized (insn) < 0)
3993 	fprintf (sched_dump, "nothing");
3994       else
3995 	print_reservation (sched_dump, insn);
3996       pressure_info = INSN_REG_PRESSURE (insn);
3997       if (pressure_info != NULL)
3998 	{
3999 	  fputc (':', sched_dump);
4000 	  for (i = 0; i < ira_pressure_classes_num; i++)
4001 	    fprintf (sched_dump, "%s%s%+d(%d)",
4002 		     scheduled_insns.length () > 1
4003 		     && INSN_LUID (insn)
4004 		     < INSN_LUID (scheduled_insns[scheduled_insns.length () - 2]) ? "@" : "",
4005 		     reg_class_names[ira_pressure_classes[i]],
4006 		     pressure_info[i].set_increase, pressure_info[i].change);
4007 	}
4008       if (sched_pressure == SCHED_PRESSURE_MODEL
4009 	  && model_curr_point < model_num_insns
4010 	  && model_index (insn) == model_curr_point)
4011 	fprintf (sched_dump, ":model %d", model_curr_point);
4012       fputc ('\n', sched_dump);
4013     }
4014 
4015   if (sched_pressure == SCHED_PRESSURE_WEIGHTED && !DEBUG_INSN_P (insn))
4016     update_reg_and_insn_max_reg_pressure (insn);
4017 
4018   /* Scheduling instruction should have all its dependencies resolved and
4019      should have been removed from the ready list.  */
4020   gcc_assert (sd_lists_empty_p (insn, SD_LIST_HARD_BACK));
4021 
4022   /* Reset debug insns invalidated by moving this insn.  */
4023   if (MAY_HAVE_DEBUG_BIND_INSNS && !DEBUG_INSN_P (insn))
4024     for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
4025 	 sd_iterator_cond (&sd_it, &dep);)
4026       {
4027 	rtx_insn *dbg = DEP_PRO (dep);
4028 	struct reg_use_data *use, *next;
4029 
4030 	if (DEP_STATUS (dep) & DEP_CANCELLED)
4031 	  {
4032 	    sd_iterator_next (&sd_it);
4033 	    continue;
4034 	  }
4035 
4036 	gcc_assert (DEBUG_BIND_INSN_P (dbg));
4037 
4038 	if (sched_verbose >= 6)
4039 	  fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
4040 		   INSN_UID (dbg));
4041 
4042 	/* ??? Rather than resetting the debug insn, we might be able
4043 	   to emit a debug temp before the just-scheduled insn, but
4044 	   this would involve checking that the expression at the
4045 	   point of the debug insn is equivalent to the expression
4046 	   before the just-scheduled insn.  They might not be: the
4047 	   expression in the debug insn may depend on other insns not
4048 	   yet scheduled that set MEMs, REGs or even other debug
4049 	   insns.  It's not clear that attempting to preserve debug
4050 	   information in these cases is worth the effort, given how
4051 	   uncommon these resets are and the likelihood that the debug
4052 	   temps introduced won't survive the schedule change.  */
4053 	INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
4054 	df_insn_rescan (dbg);
4055 
4056 	/* Unknown location doesn't use any registers.  */
4057 	for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
4058 	  {
4059 	    struct reg_use_data *prev = use;
4060 
4061 	    /* Remove use from the cyclic next_regno_use chain first.  */
4062 	    while (prev->next_regno_use != use)
4063 	      prev = prev->next_regno_use;
4064 	    prev->next_regno_use = use->next_regno_use;
4065 	    next = use->next_insn_use;
4066 	    free (use);
4067 	  }
4068 	INSN_REG_USE_LIST (dbg) = NULL;
4069 
4070 	/* We delete rather than resolve these deps, otherwise we
4071 	   crash in sched_free_deps(), because forward deps are
4072 	   expected to be released before backward deps.  */
4073 	sd_delete_dep (sd_it);
4074       }
4075 
4076   gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
4077   QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
4078 
4079   if (sched_pressure == SCHED_PRESSURE_MODEL
4080       && model_curr_point < model_num_insns
4081       && NONDEBUG_INSN_P (insn))
4082     {
4083       if (model_index (insn) == model_curr_point)
4084 	do
4085 	  model_curr_point++;
4086 	while (model_curr_point < model_num_insns
4087 	       && (QUEUE_INDEX (MODEL_INSN (model_curr_point))
4088 		   == QUEUE_SCHEDULED));
4089       else
4090 	model_recompute (insn);
4091       model_update_limit_points ();
4092       update_register_pressure (insn);
4093       if (sched_verbose >= 2)
4094 	print_curr_reg_pressure ();
4095     }
4096 
4097   gcc_assert (INSN_TICK (insn) >= MIN_TICK);
4098   if (INSN_TICK (insn) > clock_var)
4099     /* INSN has been prematurely moved from the queue to the ready list.
4100        This is possible only if following flags are set.  */
4101     gcc_assert (flag_sched_stalled_insns || sched_fusion);
4102 
4103   /* ??? Probably, if INSN is scheduled prematurely, we should leave
4104      INSN_TICK untouched.  This is a machine-dependent issue, actually.  */
4105   INSN_TICK (insn) = clock_var;
4106 
4107   check_clobbered_conditions (insn);
4108 
4109   /* Update dependent instructions.  First, see if by scheduling this insn
4110      now we broke a dependence in a way that requires us to change another
4111      insn.  */
4112   for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4113        sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
4114     {
4115       struct dep_replacement *desc = DEP_REPLACE (dep);
4116       rtx_insn *pro = DEP_PRO (dep);
4117       if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED
4118 	  && desc != NULL && desc->insn == pro)
4119 	apply_replacement (dep, false);
4120     }
4121 
4122   /* Go through and resolve forward dependencies.  */
4123   for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4124        sd_iterator_cond (&sd_it, &dep);)
4125     {
4126       rtx_insn *next = DEP_CON (dep);
4127       bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0;
4128 
4129       /* Resolve the dependence between INSN and NEXT.
4130 	 sd_resolve_dep () moves current dep to another list thus
4131 	 advancing the iterator.  */
4132       sd_resolve_dep (sd_it);
4133 
4134       if (cancelled)
4135 	{
4136 	  if (must_restore_pattern_p (next, dep))
4137 	    restore_pattern (dep, false);
4138 	  continue;
4139 	}
4140 
4141       /* Don't bother trying to mark next as ready if insn is a debug
4142 	 insn.  If insn is the last hard dependency, it will have
4143 	 already been discounted.  */
4144       if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
4145 	continue;
4146 
4147       if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
4148 	{
4149 	  int effective_cost;
4150 
4151 	  effective_cost = try_ready (next);
4152 
4153 	  if (effective_cost >= 0
4154 	      && SCHED_GROUP_P (next)
4155 	      && advance < effective_cost)
4156 	    advance = effective_cost;
4157 	}
4158       else
4159 	/* Check always has only one forward dependence (to the first insn in
4160 	   the recovery block), therefore, this will be executed only once.  */
4161 	{
4162 	  gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
4163 	  fix_recovery_deps (RECOVERY_BLOCK (insn));
4164 	}
4165     }
4166 
4167   /* Annotate the instruction with issue information -- TImode
4168      indicates that the instruction is expected not to be able
4169      to issue on the same cycle as the previous insn.  A machine
4170      may use this information to decide how the instruction should
4171      be aligned.  */
4172   if (issue_rate > 1
4173       && GET_CODE (PATTERN (insn)) != USE
4174       && GET_CODE (PATTERN (insn)) != CLOBBER
4175       && !DEBUG_INSN_P (insn))
4176     {
4177       if (reload_completed)
4178 	PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
4179       last_clock_var = clock_var;
4180     }
4181 
4182   if (nonscheduled_insns_begin != NULL_RTX)
4183     /* Indicate to debug counters that INSN is scheduled.  */
4184     nonscheduled_insns_begin = insn;
4185 
4186   return advance;
4187 }
4188 
4189 /* Functions for handling of notes.  */
4190 
4191 /* Add note list that ends on FROM_END to the end of TO_ENDP.  */
4192 void
concat_note_lists(rtx_insn * from_end,rtx_insn ** to_endp)4193 concat_note_lists (rtx_insn *from_end, rtx_insn **to_endp)
4194 {
4195   rtx_insn *from_start;
4196 
4197   /* It's easy when have nothing to concat.  */
4198   if (from_end == NULL)
4199     return;
4200 
4201   /* It's also easy when destination is empty.  */
4202   if (*to_endp == NULL)
4203     {
4204       *to_endp = from_end;
4205       return;
4206     }
4207 
4208   from_start = from_end;
4209   while (PREV_INSN (from_start) != NULL)
4210     from_start = PREV_INSN (from_start);
4211 
4212   SET_PREV_INSN (from_start) = *to_endp;
4213   SET_NEXT_INSN (*to_endp) = from_start;
4214   *to_endp = from_end;
4215 }
4216 
4217 /* Delete notes between HEAD and TAIL and put them in the chain
4218    of notes ended by NOTE_LIST.  */
4219 void
remove_notes(rtx_insn * head,rtx_insn * tail)4220 remove_notes (rtx_insn *head, rtx_insn *tail)
4221 {
4222   rtx_insn *next_tail, *insn, *next;
4223 
4224   note_list = 0;
4225   if (head == tail && !INSN_P (head))
4226     return;
4227 
4228   next_tail = NEXT_INSN (tail);
4229   for (insn = head; insn != next_tail; insn = next)
4230     {
4231       next = NEXT_INSN (insn);
4232       if (!NOTE_P (insn))
4233 	continue;
4234 
4235       switch (NOTE_KIND (insn))
4236 	{
4237 	case NOTE_INSN_BASIC_BLOCK:
4238 	  continue;
4239 
4240 	case NOTE_INSN_EPILOGUE_BEG:
4241 	  if (insn != tail)
4242 	    {
4243 	      remove_insn (insn);
4244 	      add_reg_note (next, REG_SAVE_NOTE,
4245 			    GEN_INT (NOTE_INSN_EPILOGUE_BEG));
4246 	      break;
4247 	    }
4248 	  /* FALLTHRU */
4249 
4250 	default:
4251 	  remove_insn (insn);
4252 
4253 	  /* Add the note to list that ends at NOTE_LIST.  */
4254 	  SET_PREV_INSN (insn) = note_list;
4255 	  SET_NEXT_INSN (insn) = NULL_RTX;
4256 	  if (note_list)
4257 	    SET_NEXT_INSN (note_list) = insn;
4258 	  note_list = insn;
4259 	  break;
4260 	}
4261 
4262       gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
4263     }
4264 }
4265 
4266 /* A structure to record enough data to allow us to backtrack the scheduler to
4267    a previous state.  */
4268 struct haifa_saved_data
4269 {
4270   /* Next entry on the list.  */
4271   struct haifa_saved_data *next;
4272 
4273   /* Backtracking is associated with scheduling insns that have delay slots.
4274      DELAY_PAIR points to the structure that contains the insns involved, and
4275      the number of cycles between them.  */
4276   struct delay_pair *delay_pair;
4277 
4278   /* Data used by the frontend (e.g. sched-ebb or sched-rgn).  */
4279   void *fe_saved_data;
4280   /* Data used by the backend.  */
4281   void *be_saved_data;
4282 
4283   /* Copies of global state.  */
4284   int clock_var, last_clock_var;
4285   struct ready_list ready;
4286   state_t curr_state;
4287 
4288   rtx_insn *last_scheduled_insn;
4289   rtx_insn *last_nondebug_scheduled_insn;
4290   rtx_insn *nonscheduled_insns_begin;
4291   int cycle_issued_insns;
4292 
4293   /* Copies of state used in the inner loop of schedule_block.  */
4294   struct sched_block_state sched_block;
4295 
4296   /* We don't need to save q_ptr, as its value is arbitrary and we can set it
4297      to 0 when restoring.  */
4298   int q_size;
4299   rtx_insn_list **insn_queue;
4300 
4301   /* Describe pattern replacements that occurred since this backtrack point
4302      was queued.  */
4303   vec<dep_t> replacement_deps;
4304   vec<int> replace_apply;
4305 
4306   /* A copy of the next-cycle replacement vectors at the time of the backtrack
4307      point.  */
4308   vec<dep_t> next_cycle_deps;
4309   vec<int> next_cycle_apply;
4310 };
4311 
4312 /* A record, in reverse order, of all scheduled insns which have delay slots
4313    and may require backtracking.  */
4314 static struct haifa_saved_data *backtrack_queue;
4315 
4316 /* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
4317    to SET_P.  */
4318 static void
mark_backtrack_feeds(rtx_insn * insn,int set_p)4319 mark_backtrack_feeds (rtx_insn *insn, int set_p)
4320 {
4321   sd_iterator_def sd_it;
4322   dep_t dep;
4323   FOR_EACH_DEP (insn, SD_LIST_HARD_BACK, sd_it, dep)
4324     {
4325       FEEDS_BACKTRACK_INSN (DEP_PRO (dep)) = set_p;
4326     }
4327 }
4328 
4329 /* Save the current scheduler state so that we can backtrack to it
4330    later if necessary.  PAIR gives the insns that make it necessary to
4331    save this point.  SCHED_BLOCK is the local state of schedule_block
4332    that need to be saved.  */
4333 static void
save_backtrack_point(struct delay_pair * pair,struct sched_block_state sched_block)4334 save_backtrack_point (struct delay_pair *pair,
4335 		      struct sched_block_state sched_block)
4336 {
4337   int i;
4338   struct haifa_saved_data *save = XNEW (struct haifa_saved_data);
4339 
4340   save->curr_state = xmalloc (dfa_state_size);
4341   memcpy (save->curr_state, curr_state, dfa_state_size);
4342 
4343   save->ready.first = ready.first;
4344   save->ready.n_ready = ready.n_ready;
4345   save->ready.n_debug = ready.n_debug;
4346   save->ready.veclen = ready.veclen;
4347   save->ready.vec = XNEWVEC (rtx_insn *, ready.veclen);
4348   memcpy (save->ready.vec, ready.vec, ready.veclen * sizeof (rtx));
4349 
4350   save->insn_queue = XNEWVEC (rtx_insn_list *, max_insn_queue_index + 1);
4351   save->q_size = q_size;
4352   for (i = 0; i <= max_insn_queue_index; i++)
4353     {
4354       int q = NEXT_Q_AFTER (q_ptr, i);
4355       save->insn_queue[i] = copy_INSN_LIST (insn_queue[q]);
4356     }
4357 
4358   save->clock_var = clock_var;
4359   save->last_clock_var = last_clock_var;
4360   save->cycle_issued_insns = cycle_issued_insns;
4361   save->last_scheduled_insn = last_scheduled_insn;
4362   save->last_nondebug_scheduled_insn = last_nondebug_scheduled_insn;
4363   save->nonscheduled_insns_begin = nonscheduled_insns_begin;
4364 
4365   save->sched_block = sched_block;
4366 
4367   save->replacement_deps.create (0);
4368   save->replace_apply.create (0);
4369   save->next_cycle_deps = next_cycle_replace_deps.copy ();
4370   save->next_cycle_apply = next_cycle_apply.copy ();
4371 
4372   if (current_sched_info->save_state)
4373     save->fe_saved_data = (*current_sched_info->save_state) ();
4374 
4375   if (targetm.sched.alloc_sched_context)
4376     {
4377       save->be_saved_data = targetm.sched.alloc_sched_context ();
4378       targetm.sched.init_sched_context (save->be_saved_data, false);
4379     }
4380   else
4381     save->be_saved_data = NULL;
4382 
4383   save->delay_pair = pair;
4384 
4385   save->next = backtrack_queue;
4386   backtrack_queue = save;
4387 
4388   while (pair)
4389     {
4390       mark_backtrack_feeds (pair->i2, 1);
4391       INSN_TICK (pair->i2) = INVALID_TICK;
4392       INSN_EXACT_TICK (pair->i2) = clock_var + pair_delay (pair);
4393       SHADOW_P (pair->i2) = pair->stages == 0;
4394       pair = pair->next_same_i1;
4395     }
4396 }
4397 
4398 /* Walk the ready list and all queues. If any insns have unresolved backwards
4399    dependencies, these must be cancelled deps, broken by predication.  Set or
4400    clear (depending on SET) the DEP_CANCELLED bit in DEP_STATUS.  */
4401 
4402 static void
toggle_cancelled_flags(bool set)4403 toggle_cancelled_flags (bool set)
4404 {
4405   int i;
4406   sd_iterator_def sd_it;
4407   dep_t dep;
4408 
4409   if (ready.n_ready > 0)
4410     {
4411       rtx_insn **first = ready_lastpos (&ready);
4412       for (i = 0; i < ready.n_ready; i++)
4413 	FOR_EACH_DEP (first[i], SD_LIST_BACK, sd_it, dep)
4414 	  if (!DEBUG_INSN_P (DEP_PRO (dep)))
4415 	    {
4416 	      if (set)
4417 		DEP_STATUS (dep) |= DEP_CANCELLED;
4418 	      else
4419 		DEP_STATUS (dep) &= ~DEP_CANCELLED;
4420 	    }
4421     }
4422   for (i = 0; i <= max_insn_queue_index; i++)
4423     {
4424       int q = NEXT_Q_AFTER (q_ptr, i);
4425       rtx_insn_list *link;
4426       for (link = insn_queue[q]; link; link = link->next ())
4427 	{
4428 	  rtx_insn *insn = link->insn ();
4429 	  FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4430 	    if (!DEBUG_INSN_P (DEP_PRO (dep)))
4431 	      {
4432 		if (set)
4433 		  DEP_STATUS (dep) |= DEP_CANCELLED;
4434 		else
4435 		  DEP_STATUS (dep) &= ~DEP_CANCELLED;
4436 	      }
4437 	}
4438     }
4439 }
4440 
4441 /* Undo the replacements that have occurred after backtrack point SAVE
4442    was placed.  */
4443 static void
undo_replacements_for_backtrack(struct haifa_saved_data * save)4444 undo_replacements_for_backtrack (struct haifa_saved_data *save)
4445 {
4446   while (!save->replacement_deps.is_empty ())
4447     {
4448       dep_t dep = save->replacement_deps.pop ();
4449       int apply_p = save->replace_apply.pop ();
4450 
4451       if (apply_p)
4452 	restore_pattern (dep, true);
4453       else
4454 	apply_replacement (dep, true);
4455     }
4456   save->replacement_deps.release ();
4457   save->replace_apply.release ();
4458 }
4459 
4460 /* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
4461    Restore their dependencies to an unresolved state, and mark them as
4462    queued nowhere.  */
4463 
4464 static void
unschedule_insns_until(rtx_insn * insn)4465 unschedule_insns_until (rtx_insn *insn)
4466 {
4467   auto_vec<rtx_insn *> recompute_vec;
4468 
4469   /* Make two passes over the insns to be unscheduled.  First, we clear out
4470      dependencies and other trivial bookkeeping.  */
4471   for (;;)
4472     {
4473       rtx_insn *last;
4474       sd_iterator_def sd_it;
4475       dep_t dep;
4476 
4477       last = scheduled_insns.pop ();
4478 
4479       /* This will be changed by restore_backtrack_point if the insn is in
4480 	 any queue.  */
4481       QUEUE_INDEX (last) = QUEUE_NOWHERE;
4482       if (last != insn)
4483 	INSN_TICK (last) = INVALID_TICK;
4484 
4485       if (modulo_ii > 0 && INSN_UID (last) < modulo_iter0_max_uid)
4486 	modulo_insns_scheduled--;
4487 
4488       for (sd_it = sd_iterator_start (last, SD_LIST_RES_FORW);
4489 	   sd_iterator_cond (&sd_it, &dep);)
4490 	{
4491 	  rtx_insn *con = DEP_CON (dep);
4492 	  sd_unresolve_dep (sd_it);
4493 	  if (!MUST_RECOMPUTE_SPEC_P (con))
4494 	    {
4495 	      MUST_RECOMPUTE_SPEC_P (con) = 1;
4496 	      recompute_vec.safe_push (con);
4497 	    }
4498 	}
4499 
4500       if (last == insn)
4501 	break;
4502     }
4503 
4504   /* A second pass, to update ready and speculation status for insns
4505      depending on the unscheduled ones.  The first pass must have
4506      popped the scheduled_insns vector up to the point where we
4507      restart scheduling, as recompute_todo_spec requires it to be
4508      up-to-date.  */
4509   while (!recompute_vec.is_empty ())
4510     {
4511       rtx_insn *con;
4512 
4513       con = recompute_vec.pop ();
4514       MUST_RECOMPUTE_SPEC_P (con) = 0;
4515       if (!sd_lists_empty_p (con, SD_LIST_HARD_BACK))
4516 	{
4517 	  TODO_SPEC (con) = HARD_DEP;
4518 	  INSN_TICK (con) = INVALID_TICK;
4519 	  if (PREDICATED_PAT (con) != NULL_RTX)
4520 	    haifa_change_pattern (con, ORIG_PAT (con));
4521 	}
4522       else if (QUEUE_INDEX (con) != QUEUE_SCHEDULED)
4523 	TODO_SPEC (con) = recompute_todo_spec (con, true);
4524     }
4525 }
4526 
4527 /* Restore scheduler state from the topmost entry on the backtracking queue.
4528    PSCHED_BLOCK_P points to the local data of schedule_block that we must
4529    overwrite with the saved data.
4530    The caller must already have called unschedule_insns_until.  */
4531 
4532 static void
restore_last_backtrack_point(struct sched_block_state * psched_block)4533 restore_last_backtrack_point (struct sched_block_state *psched_block)
4534 {
4535   int i;
4536   struct haifa_saved_data *save = backtrack_queue;
4537 
4538   backtrack_queue = save->next;
4539 
4540   if (current_sched_info->restore_state)
4541     (*current_sched_info->restore_state) (save->fe_saved_data);
4542 
4543   if (targetm.sched.alloc_sched_context)
4544     {
4545       targetm.sched.set_sched_context (save->be_saved_data);
4546       targetm.sched.free_sched_context (save->be_saved_data);
4547     }
4548 
4549   /* Do this first since it clobbers INSN_TICK of the involved
4550      instructions.  */
4551   undo_replacements_for_backtrack (save);
4552 
4553   /* Clear the QUEUE_INDEX of everything in the ready list or one
4554      of the queues.  */
4555   if (ready.n_ready > 0)
4556     {
4557       rtx_insn **first = ready_lastpos (&ready);
4558       for (i = 0; i < ready.n_ready; i++)
4559 	{
4560 	  rtx_insn *insn = first[i];
4561 	  QUEUE_INDEX (insn) = QUEUE_NOWHERE;
4562 	  INSN_TICK (insn) = INVALID_TICK;
4563 	}
4564     }
4565   for (i = 0; i <= max_insn_queue_index; i++)
4566     {
4567       int q = NEXT_Q_AFTER (q_ptr, i);
4568 
4569       for (rtx_insn_list *link = insn_queue[q]; link; link = link->next ())
4570 	{
4571 	  rtx_insn *x = link->insn ();
4572 	  QUEUE_INDEX (x) = QUEUE_NOWHERE;
4573 	  INSN_TICK (x) = INVALID_TICK;
4574 	}
4575       free_INSN_LIST_list (&insn_queue[q]);
4576     }
4577 
4578   free (ready.vec);
4579   ready = save->ready;
4580 
4581   if (ready.n_ready > 0)
4582     {
4583       rtx_insn **first = ready_lastpos (&ready);
4584       for (i = 0; i < ready.n_ready; i++)
4585 	{
4586 	  rtx_insn *insn = first[i];
4587 	  QUEUE_INDEX (insn) = QUEUE_READY;
4588 	  TODO_SPEC (insn) = recompute_todo_spec (insn, true);
4589 	  INSN_TICK (insn) = save->clock_var;
4590 	}
4591     }
4592 
4593   q_ptr = 0;
4594   q_size = save->q_size;
4595   for (i = 0; i <= max_insn_queue_index; i++)
4596     {
4597       int q = NEXT_Q_AFTER (q_ptr, i);
4598 
4599       insn_queue[q] = save->insn_queue[q];
4600 
4601       for (rtx_insn_list *link = insn_queue[q]; link; link = link->next ())
4602 	{
4603 	  rtx_insn *x = link->insn ();
4604 	  QUEUE_INDEX (x) = i;
4605 	  TODO_SPEC (x) = recompute_todo_spec (x, true);
4606 	  INSN_TICK (x) = save->clock_var + i;
4607 	}
4608     }
4609   free (save->insn_queue);
4610 
4611   toggle_cancelled_flags (true);
4612 
4613   clock_var = save->clock_var;
4614   last_clock_var = save->last_clock_var;
4615   cycle_issued_insns = save->cycle_issued_insns;
4616   last_scheduled_insn = save->last_scheduled_insn;
4617   last_nondebug_scheduled_insn = save->last_nondebug_scheduled_insn;
4618   nonscheduled_insns_begin = save->nonscheduled_insns_begin;
4619 
4620   *psched_block = save->sched_block;
4621 
4622   memcpy (curr_state, save->curr_state, dfa_state_size);
4623   free (save->curr_state);
4624 
4625   mark_backtrack_feeds (save->delay_pair->i2, 0);
4626 
4627   gcc_assert (next_cycle_replace_deps.is_empty ());
4628   next_cycle_replace_deps = save->next_cycle_deps.copy ();
4629   next_cycle_apply = save->next_cycle_apply.copy ();
4630 
4631   free (save);
4632 
4633   for (save = backtrack_queue; save; save = save->next)
4634     {
4635       mark_backtrack_feeds (save->delay_pair->i2, 1);
4636     }
4637 }
4638 
4639 /* Discard all data associated with the topmost entry in the backtrack
4640    queue.  If RESET_TICK is false, we just want to free the data.  If true,
4641    we are doing this because we discovered a reason to backtrack.  In the
4642    latter case, also reset the INSN_TICK for the shadow insn.  */
4643 static void
free_topmost_backtrack_point(bool reset_tick)4644 free_topmost_backtrack_point (bool reset_tick)
4645 {
4646   struct haifa_saved_data *save = backtrack_queue;
4647   int i;
4648 
4649   backtrack_queue = save->next;
4650 
4651   if (reset_tick)
4652     {
4653       struct delay_pair *pair = save->delay_pair;
4654       while (pair)
4655 	{
4656 	  INSN_TICK (pair->i2) = INVALID_TICK;
4657 	  INSN_EXACT_TICK (pair->i2) = INVALID_TICK;
4658 	  pair = pair->next_same_i1;
4659 	}
4660       undo_replacements_for_backtrack (save);
4661     }
4662   else
4663     {
4664       save->replacement_deps.release ();
4665       save->replace_apply.release ();
4666     }
4667 
4668   if (targetm.sched.free_sched_context)
4669     targetm.sched.free_sched_context (save->be_saved_data);
4670   if (current_sched_info->restore_state)
4671     free (save->fe_saved_data);
4672   for (i = 0; i <= max_insn_queue_index; i++)
4673     free_INSN_LIST_list (&save->insn_queue[i]);
4674   free (save->insn_queue);
4675   free (save->curr_state);
4676   free (save->ready.vec);
4677   free (save);
4678 }
4679 
4680 /* Free the entire backtrack queue.  */
4681 static void
free_backtrack_queue(void)4682 free_backtrack_queue (void)
4683 {
4684   while (backtrack_queue)
4685     free_topmost_backtrack_point (false);
4686 }
4687 
4688 /* Apply a replacement described by DESC.  If IMMEDIATELY is false, we
4689    may have to postpone the replacement until the start of the next cycle,
4690    at which point we will be called again with IMMEDIATELY true.  This is
4691    only done for machines which have instruction packets with explicit
4692    parallelism however.  */
4693 static void
apply_replacement(dep_t dep,bool immediately)4694 apply_replacement (dep_t dep, bool immediately)
4695 {
4696   struct dep_replacement *desc = DEP_REPLACE (dep);
4697   if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4698     {
4699       next_cycle_replace_deps.safe_push (dep);
4700       next_cycle_apply.safe_push (1);
4701     }
4702   else
4703     {
4704       bool success;
4705 
4706       if (QUEUE_INDEX (desc->insn) == QUEUE_SCHEDULED)
4707 	return;
4708 
4709       if (sched_verbose >= 5)
4710 	fprintf (sched_dump, "applying replacement for insn %d\n",
4711 		 INSN_UID (desc->insn));
4712 
4713       success = validate_change (desc->insn, desc->loc, desc->newval, 0);
4714       gcc_assert (success);
4715 
4716       rtx_insn *insn = DEP_PRO (dep);
4717 
4718       /* Recompute priority since dependent priorities may have changed.  */
4719       priority (insn, true);
4720       update_insn_after_change (desc->insn);
4721 
4722       if ((TODO_SPEC (desc->insn) & (HARD_DEP | DEP_POSTPONED)) == 0)
4723 	fix_tick_ready (desc->insn);
4724 
4725       if (backtrack_queue != NULL)
4726 	{
4727 	  backtrack_queue->replacement_deps.safe_push (dep);
4728 	  backtrack_queue->replace_apply.safe_push (1);
4729 	}
4730     }
4731 }
4732 
4733 /* We have determined that a pattern involved in DEP must be restored.
4734    If IMMEDIATELY is false, we may have to postpone the replacement
4735    until the start of the next cycle, at which point we will be called
4736    again with IMMEDIATELY true.  */
4737 static void
restore_pattern(dep_t dep,bool immediately)4738 restore_pattern (dep_t dep, bool immediately)
4739 {
4740   rtx_insn *next = DEP_CON (dep);
4741   int tick = INSN_TICK (next);
4742 
4743   /* If we already scheduled the insn, the modified version is
4744      correct.  */
4745   if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
4746     return;
4747 
4748   if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4749     {
4750       next_cycle_replace_deps.safe_push (dep);
4751       next_cycle_apply.safe_push (0);
4752       return;
4753     }
4754 
4755 
4756   if (DEP_TYPE (dep) == REG_DEP_CONTROL)
4757     {
4758       if (sched_verbose >= 5)
4759 	fprintf (sched_dump, "restoring pattern for insn %d\n",
4760 		 INSN_UID (next));
4761       haifa_change_pattern (next, ORIG_PAT (next));
4762     }
4763   else
4764     {
4765       struct dep_replacement *desc = DEP_REPLACE (dep);
4766       bool success;
4767 
4768       if (sched_verbose >= 5)
4769 	fprintf (sched_dump, "restoring pattern for insn %d\n",
4770 		 INSN_UID (desc->insn));
4771       tick = INSN_TICK (desc->insn);
4772 
4773       success = validate_change (desc->insn, desc->loc, desc->orig, 0);
4774       gcc_assert (success);
4775 
4776       rtx_insn *insn = DEP_PRO (dep);
4777 
4778       if (QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
4779 	{
4780 	  /* Recompute priority since dependent priorities may have changed.  */
4781 	  priority (insn, true);
4782 	}
4783 
4784       update_insn_after_change (desc->insn);
4785 
4786       if (backtrack_queue != NULL)
4787 	{
4788 	  backtrack_queue->replacement_deps.safe_push (dep);
4789 	  backtrack_queue->replace_apply.safe_push (0);
4790 	}
4791     }
4792   INSN_TICK (next) = tick;
4793   if (TODO_SPEC (next) == DEP_POSTPONED)
4794     return;
4795 
4796   if (sd_lists_empty_p (next, SD_LIST_BACK))
4797     TODO_SPEC (next) = 0;
4798   else if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
4799     TODO_SPEC (next) = HARD_DEP;
4800 }
4801 
4802 /* Perform pattern replacements that were queued up until the next
4803    cycle.  */
4804 static void
perform_replacements_new_cycle(void)4805 perform_replacements_new_cycle (void)
4806 {
4807   int i;
4808   dep_t dep;
4809   FOR_EACH_VEC_ELT (next_cycle_replace_deps, i, dep)
4810     {
4811       int apply_p = next_cycle_apply[i];
4812       if (apply_p)
4813 	apply_replacement (dep, true);
4814       else
4815 	restore_pattern (dep, true);
4816     }
4817   next_cycle_replace_deps.truncate (0);
4818   next_cycle_apply.truncate (0);
4819 }
4820 
4821 /* Compute INSN_TICK_ESTIMATE for INSN.  PROCESSED is a bitmap of
4822    instructions we've previously encountered, a set bit prevents
4823    recursion.  BUDGET is a limit on how far ahead we look, it is
4824    reduced on recursive calls.  Return true if we produced a good
4825    estimate, or false if we exceeded the budget.  */
4826 static bool
estimate_insn_tick(bitmap processed,rtx_insn * insn,int budget)4827 estimate_insn_tick (bitmap processed, rtx_insn *insn, int budget)
4828 {
4829   sd_iterator_def sd_it;
4830   dep_t dep;
4831   int earliest = INSN_TICK (insn);
4832 
4833   FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4834     {
4835       rtx_insn *pro = DEP_PRO (dep);
4836       int t;
4837 
4838       if (DEP_STATUS (dep) & DEP_CANCELLED)
4839 	continue;
4840 
4841       if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
4842 	gcc_assert (INSN_TICK (pro) + dep_cost (dep) <= INSN_TICK (insn));
4843       else
4844 	{
4845 	  int cost = dep_cost (dep);
4846 	  if (cost >= budget)
4847 	    return false;
4848 	  if (!bitmap_bit_p (processed, INSN_LUID (pro)))
4849 	    {
4850 	      if (!estimate_insn_tick (processed, pro, budget - cost))
4851 		return false;
4852 	    }
4853 	  gcc_assert (INSN_TICK_ESTIMATE (pro) != INVALID_TICK);
4854 	  t = INSN_TICK_ESTIMATE (pro) + cost;
4855 	  if (earliest == INVALID_TICK || t > earliest)
4856 	    earliest = t;
4857 	}
4858     }
4859   bitmap_set_bit (processed, INSN_LUID (insn));
4860   INSN_TICK_ESTIMATE (insn) = earliest;
4861   return true;
4862 }
4863 
4864 /* Examine the pair of insns in P, and estimate (optimistically, assuming
4865    infinite resources) the cycle in which the delayed shadow can be issued.
4866    Return the number of cycles that must pass before the real insn can be
4867    issued in order to meet this constraint.  */
4868 static int
estimate_shadow_tick(struct delay_pair * p)4869 estimate_shadow_tick (struct delay_pair *p)
4870 {
4871   auto_bitmap processed;
4872   int t;
4873   bool cutoff;
4874 
4875   cutoff = !estimate_insn_tick (processed, p->i2,
4876 				max_insn_queue_index + pair_delay (p));
4877   if (cutoff)
4878     return max_insn_queue_index;
4879   t = INSN_TICK_ESTIMATE (p->i2) - (clock_var + pair_delay (p) + 1);
4880   if (t > 0)
4881     return t;
4882   return 0;
4883 }
4884 
4885 /* If INSN has no unresolved backwards dependencies, add it to the schedule and
4886    recursively resolve all its forward dependencies.  */
4887 static void
resolve_dependencies(rtx_insn * insn)4888 resolve_dependencies (rtx_insn *insn)
4889 {
4890   sd_iterator_def sd_it;
4891   dep_t dep;
4892 
4893   /* Don't use sd_lists_empty_p; it ignores debug insns.  */
4894   if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (insn)) != NULL
4895       || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (insn)) != NULL)
4896     return;
4897 
4898   if (sched_verbose >= 4)
4899     fprintf (sched_dump, ";;\tquickly resolving %d\n", INSN_UID (insn));
4900 
4901   if (QUEUE_INDEX (insn) >= 0)
4902     queue_remove (insn);
4903 
4904   scheduled_insns.safe_push (insn);
4905 
4906   /* Update dependent instructions.  */
4907   for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4908        sd_iterator_cond (&sd_it, &dep);)
4909     {
4910       rtx_insn *next = DEP_CON (dep);
4911 
4912       if (sched_verbose >= 4)
4913 	fprintf (sched_dump, ";;\t\tdep %d against %d\n", INSN_UID (insn),
4914 		 INSN_UID (next));
4915 
4916       /* Resolve the dependence between INSN and NEXT.
4917 	 sd_resolve_dep () moves current dep to another list thus
4918 	 advancing the iterator.  */
4919       sd_resolve_dep (sd_it);
4920 
4921       if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
4922 	{
4923 	  resolve_dependencies (next);
4924 	}
4925       else
4926 	/* Check always has only one forward dependence (to the first insn in
4927 	   the recovery block), therefore, this will be executed only once.  */
4928 	{
4929 	  gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
4930 	}
4931     }
4932 }
4933 
4934 
4935 /* Return the head and tail pointers of ebb starting at BEG and ending
4936    at END.  */
4937 void
get_ebb_head_tail(basic_block beg,basic_block end,rtx_insn ** headp,rtx_insn ** tailp)4938 get_ebb_head_tail (basic_block beg, basic_block end,
4939 		   rtx_insn **headp, rtx_insn **tailp)
4940 {
4941   rtx_insn *beg_head = BB_HEAD (beg);
4942   rtx_insn * beg_tail = BB_END (beg);
4943   rtx_insn * end_head = BB_HEAD (end);
4944   rtx_insn * end_tail = BB_END (end);
4945 
4946   /* Don't include any notes or labels at the beginning of the BEG
4947      basic block, or notes at the end of the END basic blocks.  */
4948 
4949   if (LABEL_P (beg_head))
4950     beg_head = NEXT_INSN (beg_head);
4951 
4952   while (beg_head != beg_tail)
4953     if (NOTE_P (beg_head))
4954       beg_head = NEXT_INSN (beg_head);
4955     else if (DEBUG_INSN_P (beg_head))
4956       {
4957 	rtx_insn * note, *next;
4958 
4959 	for (note = NEXT_INSN (beg_head);
4960 	     note != beg_tail;
4961 	     note = next)
4962 	  {
4963 	    next = NEXT_INSN (note);
4964 	    if (NOTE_P (note))
4965 	      {
4966 		if (sched_verbose >= 9)
4967 		  fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
4968 
4969 		reorder_insns_nobb (note, note, PREV_INSN (beg_head));
4970 
4971 		if (BLOCK_FOR_INSN (note) != beg)
4972 		  df_insn_change_bb (note, beg);
4973 	      }
4974 	    else if (!DEBUG_INSN_P (note))
4975 	      break;
4976 	  }
4977 
4978 	break;
4979       }
4980     else
4981       break;
4982 
4983   *headp = beg_head;
4984 
4985   if (beg == end)
4986     end_head = beg_head;
4987   else if (LABEL_P (end_head))
4988     end_head = NEXT_INSN (end_head);
4989 
4990   while (end_head != end_tail)
4991     if (NOTE_P (end_tail))
4992       end_tail = PREV_INSN (end_tail);
4993     else if (DEBUG_INSN_P (end_tail))
4994       {
4995 	rtx_insn * note, *prev;
4996 
4997 	for (note = PREV_INSN (end_tail);
4998 	     note != end_head;
4999 	     note = prev)
5000 	  {
5001 	    prev = PREV_INSN (note);
5002 	    if (NOTE_P (note))
5003 	      {
5004 		if (sched_verbose >= 9)
5005 		  fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
5006 
5007 		reorder_insns_nobb (note, note, end_tail);
5008 
5009 		if (end_tail == BB_END (end))
5010 		  BB_END (end) = note;
5011 
5012 		if (BLOCK_FOR_INSN (note) != end)
5013 		  df_insn_change_bb (note, end);
5014 	      }
5015 	    else if (!DEBUG_INSN_P (note))
5016 	      break;
5017 	  }
5018 
5019 	break;
5020       }
5021     else
5022       break;
5023 
5024   *tailp = end_tail;
5025 }
5026 
5027 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ].  */
5028 
5029 int
no_real_insns_p(const rtx_insn * head,const rtx_insn * tail)5030 no_real_insns_p (const rtx_insn *head, const rtx_insn *tail)
5031 {
5032   while (head != NEXT_INSN (tail))
5033     {
5034       if (!NOTE_P (head) && !LABEL_P (head))
5035 	return 0;
5036       head = NEXT_INSN (head);
5037     }
5038   return 1;
5039 }
5040 
5041 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
5042    previously found among the insns.  Insert them just before HEAD.  */
5043 rtx_insn *
restore_other_notes(rtx_insn * head,basic_block head_bb)5044 restore_other_notes (rtx_insn *head, basic_block head_bb)
5045 {
5046   if (note_list != 0)
5047     {
5048       rtx_insn *note_head = note_list;
5049 
5050       if (head)
5051 	head_bb = BLOCK_FOR_INSN (head);
5052       else
5053 	head = NEXT_INSN (bb_note (head_bb));
5054 
5055       while (PREV_INSN (note_head))
5056 	{
5057 	  set_block_for_insn (note_head, head_bb);
5058 	  note_head = PREV_INSN (note_head);
5059 	}
5060       /* In the above cycle we've missed this note.  */
5061       set_block_for_insn (note_head, head_bb);
5062 
5063       SET_PREV_INSN (note_head) = PREV_INSN (head);
5064       SET_NEXT_INSN (PREV_INSN (head)) = note_head;
5065       SET_PREV_INSN (head) = note_list;
5066       SET_NEXT_INSN (note_list) = head;
5067 
5068       if (BLOCK_FOR_INSN (head) != head_bb)
5069 	BB_END (head_bb) = note_list;
5070 
5071       head = note_head;
5072     }
5073 
5074   return head;
5075 }
5076 
5077 /* When we know we are going to discard the schedule due to a failed attempt
5078    at modulo scheduling, undo all replacements.  */
5079 static void
undo_all_replacements(void)5080 undo_all_replacements (void)
5081 {
5082   rtx_insn *insn;
5083   int i;
5084 
5085   FOR_EACH_VEC_ELT (scheduled_insns, i, insn)
5086     {
5087       sd_iterator_def sd_it;
5088       dep_t dep;
5089 
5090       /* See if we must undo a replacement.  */
5091       for (sd_it = sd_iterator_start (insn, SD_LIST_RES_FORW);
5092 	   sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
5093 	{
5094 	  struct dep_replacement *desc = DEP_REPLACE (dep);
5095 	  if (desc != NULL)
5096 	    validate_change (desc->insn, desc->loc, desc->orig, 0);
5097 	}
5098     }
5099 }
5100 
5101 /* Return first non-scheduled insn in the current scheduling block.
5102    This is mostly used for debug-counter purposes.  */
5103 static rtx_insn *
first_nonscheduled_insn(void)5104 first_nonscheduled_insn (void)
5105 {
5106   rtx_insn *insn = (nonscheduled_insns_begin != NULL_RTX
5107 		    ? nonscheduled_insns_begin
5108 		    : current_sched_info->prev_head);
5109 
5110   do
5111     {
5112       insn = next_nonnote_nondebug_insn (insn);
5113     }
5114   while (QUEUE_INDEX (insn) == QUEUE_SCHEDULED);
5115 
5116   return insn;
5117 }
5118 
5119 /* Move insns that became ready to fire from queue to ready list.  */
5120 
5121 static void
queue_to_ready(struct ready_list * ready)5122 queue_to_ready (struct ready_list *ready)
5123 {
5124   rtx_insn *insn;
5125   rtx_insn_list *link;
5126   rtx_insn *skip_insn;
5127 
5128   q_ptr = NEXT_Q (q_ptr);
5129 
5130   if (dbg_cnt (sched_insn) == false)
5131     /* If debug counter is activated do not requeue the first
5132        nonscheduled insn.  */
5133     skip_insn = first_nonscheduled_insn ();
5134   else
5135     skip_insn = NULL;
5136 
5137   /* Add all pending insns that can be scheduled without stalls to the
5138      ready list.  */
5139   for (link = insn_queue[q_ptr]; link; link = link->next ())
5140     {
5141       insn = link->insn ();
5142       q_size -= 1;
5143 
5144       if (sched_verbose >= 2)
5145 	fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
5146 		 (*current_sched_info->print_insn) (insn, 0));
5147 
5148       /* If the ready list is full, delay the insn for 1 cycle.
5149 	 See the comment in schedule_block for the rationale.  */
5150       if (!reload_completed
5151 	  && (ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
5152 	      || (sched_pressure == SCHED_PRESSURE_MODEL
5153 		  /* Limit pressure recalculations to MAX_SCHED_READY_INSNS
5154 		     instructions too.  */
5155 		  && model_index (insn) > (model_curr_point
5156 					   + MAX_SCHED_READY_INSNS)))
5157 	  && !(sched_pressure == SCHED_PRESSURE_MODEL
5158 	       && model_curr_point < model_num_insns
5159 	       /* Always allow the next model instruction to issue.  */
5160 	       && model_index (insn) == model_curr_point)
5161 	  && !SCHED_GROUP_P (insn)
5162 	  && insn != skip_insn)
5163 	{
5164 	  if (sched_verbose >= 2)
5165 	    fprintf (sched_dump, "keeping in queue, ready full\n");
5166 	  queue_insn (insn, 1, "ready full");
5167 	}
5168       else
5169 	{
5170 	  ready_add (ready, insn, false);
5171 	  if (sched_verbose >= 2)
5172 	    fprintf (sched_dump, "moving to ready without stalls\n");
5173         }
5174     }
5175   free_INSN_LIST_list (&insn_queue[q_ptr]);
5176 
5177   /* If there are no ready insns, stall until one is ready and add all
5178      of the pending insns at that point to the ready list.  */
5179   if (ready->n_ready == 0)
5180     {
5181       int stalls;
5182 
5183       for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
5184 	{
5185 	  if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5186 	    {
5187 	      for (; link; link = link->next ())
5188 		{
5189 		  insn = link->insn ();
5190 		  q_size -= 1;
5191 
5192 		  if (sched_verbose >= 2)
5193 		    fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
5194 			     (*current_sched_info->print_insn) (insn, 0));
5195 
5196 		  ready_add (ready, insn, false);
5197 		  if (sched_verbose >= 2)
5198 		    fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
5199 		}
5200 	      free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
5201 
5202 	      advance_one_cycle ();
5203 
5204 	      break;
5205 	    }
5206 
5207 	  advance_one_cycle ();
5208 	}
5209 
5210       q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
5211       clock_var += stalls;
5212       if (sched_verbose >= 2)
5213 	fprintf (sched_dump, ";;\tAdvancing clock by %d cycle[s] to %d\n",
5214 		 stalls, clock_var);
5215     }
5216 }
5217 
5218 /* Used by early_queue_to_ready.  Determines whether it is "ok" to
5219    prematurely move INSN from the queue to the ready list.  Currently,
5220    if a target defines the hook 'is_costly_dependence', this function
5221    uses the hook to check whether there exist any dependences which are
5222    considered costly by the target, between INSN and other insns that
5223    have already been scheduled.  Dependences are checked up to Y cycles
5224    back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
5225    controlling this value.
5226    (Other considerations could be taken into account instead (or in
5227    addition) depending on user flags and target hooks.  */
5228 
5229 static bool
ok_for_early_queue_removal(rtx_insn * insn)5230 ok_for_early_queue_removal (rtx_insn *insn)
5231 {
5232   if (targetm.sched.is_costly_dependence)
5233     {
5234       int n_cycles;
5235       int i = scheduled_insns.length ();
5236       for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
5237 	{
5238 	  while (i-- > 0)
5239 	    {
5240 	      int cost;
5241 
5242 	      rtx_insn *prev_insn = scheduled_insns[i];
5243 
5244 	      if (!NOTE_P (prev_insn))
5245 		{
5246 		  dep_t dep;
5247 
5248 		  dep = sd_find_dep_between (prev_insn, insn, true);
5249 
5250 		  if (dep != NULL)
5251 		    {
5252 		      cost = dep_cost (dep);
5253 
5254 		      if (targetm.sched.is_costly_dependence (dep, cost,
5255 				flag_sched_stalled_insns_dep - n_cycles))
5256 			return false;
5257 		    }
5258 		}
5259 
5260 	      if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
5261 		break;
5262 	    }
5263 
5264 	  if (i == 0)
5265 	    break;
5266 	}
5267     }
5268 
5269   return true;
5270 }
5271 
5272 
5273 /* Remove insns from the queue, before they become "ready" with respect
5274    to FU latency considerations.  */
5275 
5276 static int
early_queue_to_ready(state_t state,struct ready_list * ready)5277 early_queue_to_ready (state_t state, struct ready_list *ready)
5278 {
5279   rtx_insn *insn;
5280   rtx_insn_list *link;
5281   rtx_insn_list *next_link;
5282   rtx_insn_list *prev_link;
5283   bool move_to_ready;
5284   int cost;
5285   state_t temp_state = alloca (dfa_state_size);
5286   int stalls;
5287   int insns_removed = 0;
5288 
5289   /*
5290      Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
5291      function:
5292 
5293      X == 0: There is no limit on how many queued insns can be removed
5294              prematurely.  (flag_sched_stalled_insns = -1).
5295 
5296      X >= 1: Only X queued insns can be removed prematurely in each
5297 	     invocation.  (flag_sched_stalled_insns = X).
5298 
5299      Otherwise: Early queue removal is disabled.
5300          (flag_sched_stalled_insns = 0)
5301   */
5302 
5303   if (! flag_sched_stalled_insns)
5304     return 0;
5305 
5306   for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
5307     {
5308       if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5309 	{
5310 	  if (sched_verbose > 6)
5311 	    fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
5312 
5313 	  prev_link = 0;
5314 	  while (link)
5315 	    {
5316 	      next_link = link->next ();
5317 	      insn = link->insn ();
5318 	      if (insn && sched_verbose > 6)
5319 		print_rtl_single (sched_dump, insn);
5320 
5321 	      memcpy (temp_state, state, dfa_state_size);
5322 	      if (recog_memoized (insn) < 0)
5323 		/* non-negative to indicate that it's not ready
5324 		   to avoid infinite Q->R->Q->R... */
5325 		cost = 0;
5326 	      else
5327 		cost = state_transition (temp_state, insn);
5328 
5329 	      if (sched_verbose >= 6)
5330 		fprintf (sched_dump, "transition cost = %d\n", cost);
5331 
5332 	      move_to_ready = false;
5333 	      if (cost < 0)
5334 		{
5335 		  move_to_ready = ok_for_early_queue_removal (insn);
5336 		  if (move_to_ready == true)
5337 		    {
5338 		      /* move from Q to R */
5339 		      q_size -= 1;
5340 		      ready_add (ready, insn, false);
5341 
5342 		      if (prev_link)
5343 			XEXP (prev_link, 1) = next_link;
5344 		      else
5345 			insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
5346 
5347 		      free_INSN_LIST_node (link);
5348 
5349 		      if (sched_verbose >= 2)
5350 			fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
5351 				 (*current_sched_info->print_insn) (insn, 0));
5352 
5353 		      insns_removed++;
5354 		      if (insns_removed == flag_sched_stalled_insns)
5355 			/* Remove no more than flag_sched_stalled_insns insns
5356 			   from Q at a time.  */
5357 			return insns_removed;
5358 		    }
5359 		}
5360 
5361 	      if (move_to_ready == false)
5362 		prev_link = link;
5363 
5364 	      link = next_link;
5365 	    } /* while link */
5366 	} /* if link */
5367 
5368     } /* for stalls.. */
5369 
5370   return insns_removed;
5371 }
5372 
5373 
5374 /* Print the ready list for debugging purposes.
5375    If READY_TRY is non-zero then only print insns that max_issue
5376    will consider.  */
5377 static void
debug_ready_list_1(struct ready_list * ready,signed char * ready_try)5378 debug_ready_list_1 (struct ready_list *ready, signed char *ready_try)
5379 {
5380   rtx_insn **p;
5381   int i;
5382 
5383   if (ready->n_ready == 0)
5384     {
5385       fprintf (sched_dump, "\n");
5386       return;
5387     }
5388 
5389   p = ready_lastpos (ready);
5390   for (i = 0; i < ready->n_ready; i++)
5391     {
5392       if (ready_try != NULL && ready_try[ready->n_ready - i - 1])
5393 	continue;
5394 
5395       fprintf (sched_dump, "  %s:%d",
5396 	       (*current_sched_info->print_insn) (p[i], 0),
5397 	       INSN_LUID (p[i]));
5398       if (sched_pressure != SCHED_PRESSURE_NONE)
5399 	fprintf (sched_dump, "(cost=%d",
5400 		 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
5401       fprintf (sched_dump, ":prio=%d", INSN_PRIORITY (p[i]));
5402       if (INSN_TICK (p[i]) > clock_var)
5403 	fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
5404       if (sched_pressure == SCHED_PRESSURE_MODEL)
5405 	fprintf (sched_dump, ":idx=%d",
5406 		 model_index (p[i]));
5407       if (sched_pressure != SCHED_PRESSURE_NONE)
5408 	fprintf (sched_dump, ")");
5409     }
5410   fprintf (sched_dump, "\n");
5411 }
5412 
5413 /* Print the ready list.  Callable from debugger.  */
5414 static void
debug_ready_list(struct ready_list * ready)5415 debug_ready_list (struct ready_list *ready)
5416 {
5417   debug_ready_list_1 (ready, NULL);
5418 }
5419 
5420 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
5421    NOTEs.  This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
5422    replaces the epilogue note in the correct basic block.  */
5423 void
reemit_notes(rtx_insn * insn)5424 reemit_notes (rtx_insn *insn)
5425 {
5426   rtx note;
5427   rtx_insn *last = insn;
5428 
5429   for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
5430     {
5431       if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
5432 	{
5433 	  enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
5434 
5435 	  last = emit_note_before (note_type, last);
5436 	  remove_note (insn, note);
5437 	}
5438     }
5439 }
5440 
5441 /* Move INSN.  Reemit notes if needed.  Update CFG, if needed.  */
5442 static void
move_insn(rtx_insn * insn,rtx_insn * last,rtx nt)5443 move_insn (rtx_insn *insn, rtx_insn *last, rtx nt)
5444 {
5445   if (PREV_INSN (insn) != last)
5446     {
5447       basic_block bb;
5448       rtx_insn *note;
5449       int jump_p = 0;
5450 
5451       bb = BLOCK_FOR_INSN (insn);
5452 
5453       /* BB_HEAD is either LABEL or NOTE.  */
5454       gcc_assert (BB_HEAD (bb) != insn);
5455 
5456       if (BB_END (bb) == insn)
5457 	/* If this is last instruction in BB, move end marker one
5458 	   instruction up.  */
5459 	{
5460 	  /* Jumps are always placed at the end of basic block.  */
5461 	  jump_p = control_flow_insn_p (insn);
5462 
5463 	  gcc_assert (!jump_p
5464 		      || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
5465 			  && IS_SPECULATION_BRANCHY_CHECK_P (insn))
5466 		      || (common_sched_info->sched_pass_id
5467 			  == SCHED_EBB_PASS));
5468 
5469 	  gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
5470 
5471 	  BB_END (bb) = PREV_INSN (insn);
5472 	}
5473 
5474       gcc_assert (BB_END (bb) != last);
5475 
5476       if (jump_p)
5477 	/* We move the block note along with jump.  */
5478 	{
5479 	  gcc_assert (nt);
5480 
5481 	  note = NEXT_INSN (insn);
5482 	  while (NOTE_NOT_BB_P (note) && note != nt)
5483 	    note = NEXT_INSN (note);
5484 
5485 	  if (note != nt
5486 	      && (LABEL_P (note)
5487 		  || BARRIER_P (note)))
5488 	    note = NEXT_INSN (note);
5489 
5490 	  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5491 	}
5492       else
5493 	note = insn;
5494 
5495       SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
5496       SET_PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
5497 
5498       SET_NEXT_INSN (note) = NEXT_INSN (last);
5499       SET_PREV_INSN (NEXT_INSN (last)) = note;
5500 
5501       SET_NEXT_INSN (last) = insn;
5502       SET_PREV_INSN (insn) = last;
5503 
5504       bb = BLOCK_FOR_INSN (last);
5505 
5506       if (jump_p)
5507 	{
5508 	  fix_jump_move (insn);
5509 
5510 	  if (BLOCK_FOR_INSN (insn) != bb)
5511 	    move_block_after_check (insn);
5512 
5513 	  gcc_assert (BB_END (bb) == last);
5514 	}
5515 
5516       df_insn_change_bb (insn, bb);
5517 
5518       /* Update BB_END, if needed.  */
5519       if (BB_END (bb) == last)
5520 	BB_END (bb) = insn;
5521     }
5522 
5523   SCHED_GROUP_P (insn) = 0;
5524 }
5525 
5526 /* Return true if scheduling INSN will finish current clock cycle.  */
5527 static bool
insn_finishes_cycle_p(rtx_insn * insn)5528 insn_finishes_cycle_p (rtx_insn *insn)
5529 {
5530   if (SCHED_GROUP_P (insn))
5531     /* After issuing INSN, rest of the sched_group will be forced to issue
5532        in order.  Don't make any plans for the rest of cycle.  */
5533     return true;
5534 
5535   /* Finishing the block will, apparently, finish the cycle.  */
5536   if (current_sched_info->insn_finishes_block_p
5537       && current_sched_info->insn_finishes_block_p (insn))
5538     return true;
5539 
5540   return false;
5541 }
5542 
5543 /* Helper for autopref_multipass_init.  Given a SET in PAT and whether
5544    we're expecting a memory WRITE or not, check that the insn is relevant to
5545    the autoprefetcher modelling code.  Return true iff that is the case.
5546    If it is relevant, record the base register of the memory op in BASE and
5547    the offset in OFFSET.  */
5548 
5549 static bool
analyze_set_insn_for_autopref(rtx pat,bool write,rtx * base,int * offset)5550 analyze_set_insn_for_autopref (rtx pat, bool write, rtx *base, int *offset)
5551 {
5552   if (GET_CODE (pat) != SET)
5553     return false;
5554 
5555   rtx mem = write ? SET_DEST (pat) : SET_SRC (pat);
5556   if (!MEM_P (mem))
5557     return false;
5558 
5559   struct address_info info;
5560   decompose_mem_address (&info, mem);
5561 
5562   /* TODO: Currently only (base+const) addressing is supported.  */
5563   if (info.base == NULL || !REG_P (*info.base)
5564       || (info.disp != NULL && !CONST_INT_P (*info.disp)))
5565     return false;
5566 
5567   *base = *info.base;
5568   *offset = info.disp ? INTVAL (*info.disp) : 0;
5569   return true;
5570 }
5571 
5572 /* Functions to model cache auto-prefetcher.
5573 
5574    Some of the CPUs have cache auto-prefetcher, which /seems/ to initiate
5575    memory prefetches if it sees instructions with consequitive memory accesses
5576    in the instruction stream.  Details of such hardware units are not published,
5577    so we can only guess what exactly is going on there.
5578    In the scheduler, we model abstract auto-prefetcher.  If there are memory
5579    insns in the ready list (or the queue) that have same memory base, but
5580    different offsets, then we delay the insns with larger offsets until insns
5581    with smaller offsets get scheduled.  If PARAM_SCHED_AUTOPREF_QUEUE_DEPTH
5582    is "1", then we look at the ready list; if it is N>1, then we also look
5583    through N-1 queue entries.
5584    If the param is N>=0, then rank_for_schedule will consider auto-prefetching
5585    among its heuristics.
5586    Param value of "-1" disables modelling of the auto-prefetcher.  */
5587 
5588 /* Initialize autoprefetcher model data for INSN.  */
5589 static void
autopref_multipass_init(const rtx_insn * insn,int write)5590 autopref_multipass_init (const rtx_insn *insn, int write)
5591 {
5592   autopref_multipass_data_t data = &INSN_AUTOPREF_MULTIPASS_DATA (insn)[write];
5593 
5594   gcc_assert (data->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED);
5595   data->base = NULL_RTX;
5596   data->offset = 0;
5597   /* Set insn entry initialized, but not relevant for auto-prefetcher.  */
5598   data->status = AUTOPREF_MULTIPASS_DATA_IRRELEVANT;
5599 
5600   rtx pat = PATTERN (insn);
5601 
5602   /* We have a multi-set insn like a load-multiple or store-multiple.
5603      We care about these as long as all the memory ops inside the PARALLEL
5604      have the same base register.  We care about the minimum and maximum
5605      offsets from that base but don't check for the order of those offsets
5606      within the PARALLEL insn itself.  */
5607   if (GET_CODE (pat) == PARALLEL)
5608     {
5609       int n_elems = XVECLEN (pat, 0);
5610 
5611       int i, offset;
5612       rtx base, prev_base = NULL_RTX;
5613       int min_offset = INT_MAX;
5614 
5615       for (i = 0; i < n_elems; i++)
5616 	{
5617 	  rtx set = XVECEXP (pat, 0, i);
5618 	  if (GET_CODE (set) != SET)
5619 	    return;
5620 
5621 	  if (!analyze_set_insn_for_autopref (set, write, &base, &offset))
5622 	    return;
5623 
5624 	  /* Ensure that all memory operations in the PARALLEL use the same
5625 	     base register.  */
5626 	  if (i > 0 && REGNO (base) != REGNO (prev_base))
5627 	    return;
5628 	  prev_base = base;
5629 	  min_offset = MIN (min_offset, offset);
5630 	}
5631 
5632       /* If we reached here then we have a valid PARALLEL of multiple memory ops
5633 	 with prev_base as the base and min_offset containing the offset.  */
5634       gcc_assert (prev_base);
5635       data->base = prev_base;
5636       data->offset = min_offset;
5637       data->status = AUTOPREF_MULTIPASS_DATA_NORMAL;
5638       return;
5639     }
5640 
5641   /* Otherwise this is a single set memory operation.  */
5642   rtx set = single_set (insn);
5643   if (set == NULL_RTX)
5644     return;
5645 
5646   if (!analyze_set_insn_for_autopref (set, write, &data->base,
5647 				       &data->offset))
5648     return;
5649 
5650   /* This insn is relevant for the auto-prefetcher.
5651      The base and offset fields will have been filled in the
5652      analyze_set_insn_for_autopref call above.  */
5653   data->status = AUTOPREF_MULTIPASS_DATA_NORMAL;
5654 }
5655 
5656 /* Helper function for rank_for_schedule sorting.  */
5657 static int
autopref_rank_for_schedule(const rtx_insn * insn1,const rtx_insn * insn2)5658 autopref_rank_for_schedule (const rtx_insn *insn1, const rtx_insn *insn2)
5659 {
5660   int r = 0;
5661   for (int write = 0; write < 2 && !r; ++write)
5662     {
5663       autopref_multipass_data_t data1
5664 	= &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5665       autopref_multipass_data_t data2
5666 	= &INSN_AUTOPREF_MULTIPASS_DATA (insn2)[write];
5667 
5668       if (data1->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5669 	autopref_multipass_init (insn1, write);
5670 
5671       if (data2->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5672 	autopref_multipass_init (insn2, write);
5673 
5674       int irrel1 = data1->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT;
5675       int irrel2 = data2->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT;
5676 
5677       if (!irrel1 && !irrel2)
5678 	r = data1->offset - data2->offset;
5679       else
5680 	r = irrel2 - irrel1;
5681     }
5682 
5683   return r;
5684 }
5685 
5686 /* True if header of debug dump was printed.  */
5687 static bool autopref_multipass_dfa_lookahead_guard_started_dump_p;
5688 
5689 /* Helper for autopref_multipass_dfa_lookahead_guard.
5690    Return "1" if INSN1 should be delayed in favor of INSN2.  */
5691 static int
autopref_multipass_dfa_lookahead_guard_1(const rtx_insn * insn1,const rtx_insn * insn2,int write)5692 autopref_multipass_dfa_lookahead_guard_1 (const rtx_insn *insn1,
5693 					  const rtx_insn *insn2, int write)
5694 {
5695   autopref_multipass_data_t data1
5696     = &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5697   autopref_multipass_data_t data2
5698     = &INSN_AUTOPREF_MULTIPASS_DATA (insn2)[write];
5699 
5700   if (data2->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5701     autopref_multipass_init (insn2, write);
5702   if (data2->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5703     return 0;
5704 
5705   if (rtx_equal_p (data1->base, data2->base)
5706       && data1->offset > data2->offset)
5707     {
5708       if (sched_verbose >= 2)
5709 	{
5710           if (!autopref_multipass_dfa_lookahead_guard_started_dump_p)
5711 	    {
5712 	      fprintf (sched_dump,
5713 		       ";;\t\tnot trying in max_issue due to autoprefetch "
5714 		       "model: ");
5715 	      autopref_multipass_dfa_lookahead_guard_started_dump_p = true;
5716 	    }
5717 
5718 	  fprintf (sched_dump, " %d(%d)", INSN_UID (insn1), INSN_UID (insn2));
5719 	}
5720 
5721       return 1;
5722     }
5723 
5724   return 0;
5725 }
5726 
5727 /* General note:
5728 
5729    We could have also hooked autoprefetcher model into
5730    first_cycle_multipass_backtrack / first_cycle_multipass_issue hooks
5731    to enable intelligent selection of "[r1+0]=r2; [r1+4]=r3" on the same cycle
5732    (e.g., once "[r1+0]=r2" is issued in max_issue(), "[r1+4]=r3" gets
5733    unblocked).  We don't bother about this yet because target of interest
5734    (ARM Cortex-A15) can issue only 1 memory operation per cycle.  */
5735 
5736 /* Implementation of first_cycle_multipass_dfa_lookahead_guard hook.
5737    Return "1" if INSN1 should not be considered in max_issue due to
5738    auto-prefetcher considerations.  */
5739 int
autopref_multipass_dfa_lookahead_guard(rtx_insn * insn1,int ready_index)5740 autopref_multipass_dfa_lookahead_guard (rtx_insn *insn1, int ready_index)
5741 {
5742   int r = 0;
5743 
5744   /* Exit early if the param forbids this or if we're not entering here through
5745      normal haifa scheduling.  This can happen if selective scheduling is
5746      explicitly enabled.  */
5747   if (!insn_queue || PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) <= 0)
5748     return 0;
5749 
5750   if (sched_verbose >= 2 && ready_index == 0)
5751     autopref_multipass_dfa_lookahead_guard_started_dump_p = false;
5752 
5753   for (int write = 0; write < 2; ++write)
5754     {
5755       autopref_multipass_data_t data1
5756 	= &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5757 
5758       if (data1->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5759 	autopref_multipass_init (insn1, write);
5760       if (data1->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5761 	continue;
5762 
5763       if (ready_index == 0
5764 	  && data1->status == AUTOPREF_MULTIPASS_DATA_DONT_DELAY)
5765 	/* We allow only a single delay on priviledged instructions.
5766 	   Doing otherwise would cause infinite loop.  */
5767 	{
5768 	  if (sched_verbose >= 2)
5769 	    {
5770 	      if (!autopref_multipass_dfa_lookahead_guard_started_dump_p)
5771 		{
5772 		  fprintf (sched_dump,
5773 			   ";;\t\tnot trying in max_issue due to autoprefetch "
5774 			   "model: ");
5775 		  autopref_multipass_dfa_lookahead_guard_started_dump_p = true;
5776 		}
5777 
5778 	      fprintf (sched_dump, " *%d*", INSN_UID (insn1));
5779 	    }
5780 	  continue;
5781 	}
5782 
5783       for (int i2 = 0; i2 < ready.n_ready; ++i2)
5784 	{
5785 	  rtx_insn *insn2 = get_ready_element (i2);
5786 	  if (insn1 == insn2)
5787 	    continue;
5788 	  r = autopref_multipass_dfa_lookahead_guard_1 (insn1, insn2, write);
5789 	  if (r)
5790 	    {
5791 	      if (ready_index == 0)
5792 		{
5793 		  r = -1;
5794 		  data1->status = AUTOPREF_MULTIPASS_DATA_DONT_DELAY;
5795 		}
5796 	      goto finish;
5797 	    }
5798 	}
5799 
5800       if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) == 1)
5801 	continue;
5802 
5803       /* Everything from the current queue slot should have been moved to
5804 	 the ready list.  */
5805       gcc_assert (insn_queue[NEXT_Q_AFTER (q_ptr, 0)] == NULL_RTX);
5806 
5807       int n_stalls = PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) - 1;
5808       if (n_stalls > max_insn_queue_index)
5809 	n_stalls = max_insn_queue_index;
5810 
5811       for (int stalls = 1; stalls <= n_stalls; ++stalls)
5812 	{
5813 	  for (rtx_insn_list *link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)];
5814 	       link != NULL_RTX;
5815 	       link = link->next ())
5816 	    {
5817 	      rtx_insn *insn2 = link->insn ();
5818 	      r = autopref_multipass_dfa_lookahead_guard_1 (insn1, insn2,
5819 							    write);
5820 	      if (r)
5821 		{
5822 		  /* Queue INSN1 until INSN2 can issue.  */
5823 		  r = -stalls;
5824 		  if (ready_index == 0)
5825 		    data1->status = AUTOPREF_MULTIPASS_DATA_DONT_DELAY;
5826 		  goto finish;
5827 		}
5828 	    }
5829 	}
5830     }
5831 
5832     finish:
5833   if (sched_verbose >= 2
5834       && autopref_multipass_dfa_lookahead_guard_started_dump_p
5835       && (ready_index == ready.n_ready - 1 || r < 0))
5836     /* This does not /always/ trigger.  We don't output EOL if the last
5837        insn is not recognized (INSN_CODE < 0) and lookahead_guard is not
5838        called.  We can live with this.  */
5839     fprintf (sched_dump, "\n");
5840 
5841   return r;
5842 }
5843 
5844 /* Define type for target data used in multipass scheduling.  */
5845 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
5846 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
5847 #endif
5848 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t;
5849 
5850 /* The following structure describe an entry of the stack of choices.  */
5851 struct choice_entry
5852 {
5853   /* Ordinal number of the issued insn in the ready queue.  */
5854   int index;
5855   /* The number of the rest insns whose issues we should try.  */
5856   int rest;
5857   /* The number of issued essential insns.  */
5858   int n;
5859   /* State after issuing the insn.  */
5860   state_t state;
5861   /* Target-specific data.  */
5862   first_cycle_multipass_data_t target_data;
5863 };
5864 
5865 /* The following array is used to implement a stack of choices used in
5866    function max_issue.  */
5867 static struct choice_entry *choice_stack;
5868 
5869 /* This holds the value of the target dfa_lookahead hook.  */
5870 int dfa_lookahead;
5871 
5872 /* The following variable value is maximal number of tries of issuing
5873    insns for the first cycle multipass insn scheduling.  We define
5874    this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE).  We would not
5875    need this constraint if all real insns (with non-negative codes)
5876    had reservations because in this case the algorithm complexity is
5877    O(DFA_LOOKAHEAD**ISSUE_RATE).  Unfortunately, the dfa descriptions
5878    might be incomplete and such insn might occur.  For such
5879    descriptions, the complexity of algorithm (without the constraint)
5880    could achieve DFA_LOOKAHEAD ** N , where N is the queue length.  */
5881 static int max_lookahead_tries;
5882 
5883 /* The following function returns maximal (or close to maximal) number
5884    of insns which can be issued on the same cycle and one of which
5885    insns is insns with the best rank (the first insn in READY).  To
5886    make this function tries different samples of ready insns.  READY
5887    is current queue `ready'.  Global array READY_TRY reflects what
5888    insns are already issued in this try.  The function stops immediately,
5889    if it reached the such a solution, that all instruction can be issued.
5890    INDEX will contain index of the best insn in READY.  The following
5891    function is used only for first cycle multipass scheduling.
5892 
5893    PRIVILEGED_N >= 0
5894 
5895    This function expects recognized insns only.  All USEs,
5896    CLOBBERs, etc must be filtered elsewhere.  */
5897 int
max_issue(struct ready_list * ready,int privileged_n,state_t state,bool first_cycle_insn_p,int * index)5898 max_issue (struct ready_list *ready, int privileged_n, state_t state,
5899 	   bool first_cycle_insn_p, int *index)
5900 {
5901   int n, i, all, n_ready, best, delay, tries_num;
5902   int more_issue;
5903   struct choice_entry *top;
5904   rtx_insn *insn;
5905 
5906   if (sched_fusion)
5907     return 0;
5908 
5909   n_ready = ready->n_ready;
5910   gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
5911 	      && privileged_n <= n_ready);
5912 
5913   /* Init MAX_LOOKAHEAD_TRIES.  */
5914   if (max_lookahead_tries == 0)
5915     {
5916       max_lookahead_tries = 100;
5917       for (i = 0; i < issue_rate; i++)
5918 	max_lookahead_tries *= dfa_lookahead;
5919     }
5920 
5921   /* Init max_points.  */
5922   more_issue = issue_rate - cycle_issued_insns;
5923   gcc_assert (more_issue >= 0);
5924 
5925   /* The number of the issued insns in the best solution.  */
5926   best = 0;
5927 
5928   top = choice_stack;
5929 
5930   /* Set initial state of the search.  */
5931   memcpy (top->state, state, dfa_state_size);
5932   top->rest = dfa_lookahead;
5933   top->n = 0;
5934   if (targetm.sched.first_cycle_multipass_begin)
5935     targetm.sched.first_cycle_multipass_begin (&top->target_data,
5936 					       ready_try, n_ready,
5937 					       first_cycle_insn_p);
5938 
5939   /* Count the number of the insns to search among.  */
5940   for (all = i = 0; i < n_ready; i++)
5941     if (!ready_try [i])
5942       all++;
5943 
5944   if (sched_verbose >= 2)
5945     {
5946       fprintf (sched_dump, ";;\t\tmax_issue among %d insns:", all);
5947       debug_ready_list_1 (ready, ready_try);
5948     }
5949 
5950   /* I is the index of the insn to try next.  */
5951   i = 0;
5952   tries_num = 0;
5953   for (;;)
5954     {
5955       if (/* If we've reached a dead end or searched enough of what we have
5956 	     been asked...  */
5957 	  top->rest == 0
5958 	  /* or have nothing else to try...  */
5959 	  || i >= n_ready
5960 	  /* or should not issue more.  */
5961 	  || top->n >= more_issue)
5962 	{
5963 	  /* ??? (... || i == n_ready).  */
5964 	  gcc_assert (i <= n_ready);
5965 
5966 	  /* We should not issue more than issue_rate instructions.  */
5967 	  gcc_assert (top->n <= more_issue);
5968 
5969 	  if (top == choice_stack)
5970 	    break;
5971 
5972 	  if (best < top - choice_stack)
5973 	    {
5974 	      if (privileged_n)
5975 		{
5976 		  n = privileged_n;
5977 		  /* Try to find issued privileged insn.  */
5978 		  while (n && !ready_try[--n])
5979 		    ;
5980 		}
5981 
5982 	      if (/* If all insns are equally good...  */
5983 		  privileged_n == 0
5984 		  /* Or a privileged insn will be issued.  */
5985 		  || ready_try[n])
5986 		/* Then we have a solution.  */
5987 		{
5988 		  best = top - choice_stack;
5989 		  /* This is the index of the insn issued first in this
5990 		     solution.  */
5991 		  *index = choice_stack [1].index;
5992 		  if (top->n == more_issue || best == all)
5993 		    break;
5994 		}
5995 	    }
5996 
5997 	  /* Set ready-list index to point to the last insn
5998 	     ('i++' below will advance it to the next insn).  */
5999 	  i = top->index;
6000 
6001 	  /* Backtrack.  */
6002 	  ready_try [i] = 0;
6003 
6004 	  if (targetm.sched.first_cycle_multipass_backtrack)
6005 	    targetm.sched.first_cycle_multipass_backtrack (&top->target_data,
6006 							   ready_try, n_ready);
6007 
6008 	  top--;
6009 	  memcpy (state, top->state, dfa_state_size);
6010 	}
6011       else if (!ready_try [i])
6012 	{
6013 	  tries_num++;
6014 	  if (tries_num > max_lookahead_tries)
6015 	    break;
6016 	  insn = ready_element (ready, i);
6017 	  delay = state_transition (state, insn);
6018 	  if (delay < 0)
6019 	    {
6020 	      if (state_dead_lock_p (state)
6021 		  || insn_finishes_cycle_p (insn))
6022 		/* We won't issue any more instructions in the next
6023 		   choice_state.  */
6024 		top->rest = 0;
6025 	      else
6026 		top->rest--;
6027 
6028 	      n = top->n;
6029 	      if (memcmp (top->state, state, dfa_state_size) != 0)
6030 		n++;
6031 
6032 	      /* Advance to the next choice_entry.  */
6033 	      top++;
6034 	      /* Initialize it.  */
6035 	      top->rest = dfa_lookahead;
6036 	      top->index = i;
6037 	      top->n = n;
6038 	      memcpy (top->state, state, dfa_state_size);
6039 	      ready_try [i] = 1;
6040 
6041 	      if (targetm.sched.first_cycle_multipass_issue)
6042 		targetm.sched.first_cycle_multipass_issue (&top->target_data,
6043 							   ready_try, n_ready,
6044 							   insn,
6045 							   &((top - 1)
6046 							     ->target_data));
6047 
6048 	      i = -1;
6049 	    }
6050 	}
6051 
6052       /* Increase ready-list index.  */
6053       i++;
6054     }
6055 
6056   if (targetm.sched.first_cycle_multipass_end)
6057     targetm.sched.first_cycle_multipass_end (best != 0
6058 					     ? &choice_stack[1].target_data
6059 					     : NULL);
6060 
6061   /* Restore the original state of the DFA.  */
6062   memcpy (state, choice_stack->state, dfa_state_size);
6063 
6064   return best;
6065 }
6066 
6067 /* The following function chooses insn from READY and modifies
6068    READY.  The following function is used only for first
6069    cycle multipass scheduling.
6070    Return:
6071    -1 if cycle should be advanced,
6072    0 if INSN_PTR is set to point to the desirable insn,
6073    1 if choose_ready () should be restarted without advancing the cycle.  */
6074 static int
choose_ready(struct ready_list * ready,bool first_cycle_insn_p,rtx_insn ** insn_ptr)6075 choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
6076 	      rtx_insn **insn_ptr)
6077 {
6078   if (dbg_cnt (sched_insn) == false)
6079     {
6080       if (nonscheduled_insns_begin == NULL_RTX)
6081 	nonscheduled_insns_begin = current_sched_info->prev_head;
6082 
6083       rtx_insn *insn = first_nonscheduled_insn ();
6084 
6085       if (QUEUE_INDEX (insn) == QUEUE_READY)
6086 	/* INSN is in the ready_list.  */
6087 	{
6088 	  ready_remove_insn (insn);
6089 	  *insn_ptr = insn;
6090 	  return 0;
6091 	}
6092 
6093       /* INSN is in the queue.  Advance cycle to move it to the ready list.  */
6094       gcc_assert (QUEUE_INDEX (insn) >= 0);
6095       return -1;
6096     }
6097 
6098   if (dfa_lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
6099       || DEBUG_INSN_P (ready_element (ready, 0)))
6100     {
6101       if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
6102 	*insn_ptr = ready_remove_first_dispatch (ready);
6103       else
6104 	*insn_ptr = ready_remove_first (ready);
6105 
6106       return 0;
6107     }
6108   else
6109     {
6110       /* Try to choose the best insn.  */
6111       int index = 0, i;
6112       rtx_insn *insn;
6113 
6114       insn = ready_element (ready, 0);
6115       if (INSN_CODE (insn) < 0)
6116 	{
6117 	  *insn_ptr = ready_remove_first (ready);
6118 	  return 0;
6119 	}
6120 
6121       /* Filter the search space.  */
6122       for (i = 0; i < ready->n_ready; i++)
6123 	{
6124 	  ready_try[i] = 0;
6125 
6126 	  insn = ready_element (ready, i);
6127 
6128 	  /* If this insn is recognizable we should have already
6129 	     recognized it earlier.
6130 	     ??? Not very clear where this is supposed to be done.
6131 	     See dep_cost_1.  */
6132 	  gcc_checking_assert (INSN_CODE (insn) >= 0
6133 			       || recog_memoized (insn) < 0);
6134 	  if (INSN_CODE (insn) < 0)
6135 	    {
6136 	      /* Non-recognized insns at position 0 are handled above.  */
6137 	      gcc_assert (i > 0);
6138 	      ready_try[i] = 1;
6139 	      continue;
6140 	    }
6141 
6142 	  if (targetm.sched.first_cycle_multipass_dfa_lookahead_guard)
6143 	    {
6144 	      ready_try[i]
6145 		= (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
6146 		    (insn, i));
6147 
6148 	      if (ready_try[i] < 0)
6149 		/* Queue instruction for several cycles.
6150 		   We need to restart choose_ready as we have changed
6151 		   the ready list.  */
6152 		{
6153 		  change_queue_index (insn, -ready_try[i]);
6154 		  return 1;
6155 		}
6156 
6157 	      /* Make sure that we didn't end up with 0'th insn filtered out.
6158 		 Don't be tempted to make life easier for backends and just
6159 		 requeue 0'th insn if (ready_try[0] == 0) and restart
6160 		 choose_ready.  Backends should be very considerate about
6161 		 requeueing instructions -- especially the highest priority
6162 		 one at position 0.  */
6163 	      gcc_assert (ready_try[i] == 0 || i > 0);
6164 	      if (ready_try[i])
6165 		continue;
6166 	    }
6167 
6168 	  gcc_assert (ready_try[i] == 0);
6169 	  /* INSN made it through the scrutiny of filters!  */
6170 	}
6171 
6172       if (max_issue (ready, 1, curr_state, first_cycle_insn_p, &index) == 0)
6173 	{
6174 	  *insn_ptr = ready_remove_first (ready);
6175 	  if (sched_verbose >= 4)
6176 	    fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
6177                      (*current_sched_info->print_insn) (*insn_ptr, 0));
6178 	  return 0;
6179 	}
6180       else
6181 	{
6182 	  if (sched_verbose >= 4)
6183 	    fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
6184 		     (*current_sched_info->print_insn)
6185 		     (ready_element (ready, index), 0));
6186 
6187 	  *insn_ptr = ready_remove (ready, index);
6188 	  return 0;
6189 	}
6190     }
6191 }
6192 
6193 /* This function is called when we have successfully scheduled a
6194    block.  It uses the schedule stored in the scheduled_insns vector
6195    to rearrange the RTL.  PREV_HEAD is used as the anchor to which we
6196    append the scheduled insns; TAIL is the insn after the scheduled
6197    block.  TARGET_BB is the argument passed to schedule_block.  */
6198 
6199 static void
commit_schedule(rtx_insn * prev_head,rtx_insn * tail,basic_block * target_bb)6200 commit_schedule (rtx_insn *prev_head, rtx_insn *tail, basic_block *target_bb)
6201 {
6202   unsigned int i;
6203   rtx_insn *insn;
6204 
6205   last_scheduled_insn = prev_head;
6206   for (i = 0;
6207        scheduled_insns.iterate (i, &insn);
6208        i++)
6209     {
6210       if (control_flow_insn_p (last_scheduled_insn)
6211 	  || current_sched_info->advance_target_bb (*target_bb, insn))
6212 	{
6213 	  *target_bb = current_sched_info->advance_target_bb (*target_bb, 0);
6214 
6215 	  if (sched_verbose)
6216 	    {
6217 	      rtx_insn *x;
6218 
6219 	      x = next_real_insn (last_scheduled_insn);
6220 	      gcc_assert (x);
6221 	      dump_new_block_header (1, *target_bb, x, tail);
6222 	    }
6223 
6224 	  last_scheduled_insn = bb_note (*target_bb);
6225 	}
6226 
6227       if (current_sched_info->begin_move_insn)
6228 	(*current_sched_info->begin_move_insn) (insn, last_scheduled_insn);
6229       move_insn (insn, last_scheduled_insn,
6230 		 current_sched_info->next_tail);
6231       if (!DEBUG_INSN_P (insn))
6232 	reemit_notes (insn);
6233       last_scheduled_insn = insn;
6234     }
6235 
6236   scheduled_insns.truncate (0);
6237 }
6238 
6239 /* Examine all insns on the ready list and queue those which can't be
6240    issued in this cycle.  TEMP_STATE is temporary scheduler state we
6241    can use as scratch space.  If FIRST_CYCLE_INSN_P is true, no insns
6242    have been issued for the current cycle, which means it is valid to
6243    issue an asm statement.
6244 
6245    If SHADOWS_ONLY_P is true, we eliminate all real insns and only
6246    leave those for which SHADOW_P is true.  If MODULO_EPILOGUE is true,
6247    we only leave insns which have an INSN_EXACT_TICK.  */
6248 
6249 static void
prune_ready_list(state_t temp_state,bool first_cycle_insn_p,bool shadows_only_p,bool modulo_epilogue_p)6250 prune_ready_list (state_t temp_state, bool first_cycle_insn_p,
6251 		  bool shadows_only_p, bool modulo_epilogue_p)
6252 {
6253   int i, pass;
6254   bool sched_group_found = false;
6255   int min_cost_group = 0;
6256 
6257   if (sched_fusion)
6258     return;
6259 
6260   for (i = 0; i < ready.n_ready; i++)
6261     {
6262       rtx_insn *insn = ready_element (&ready, i);
6263       if (SCHED_GROUP_P (insn))
6264 	{
6265 	  sched_group_found = true;
6266 	  break;
6267 	}
6268     }
6269 
6270   /* Make two passes if there's a SCHED_GROUP_P insn; make sure to handle
6271      such an insn first and note its cost.  If at least one SCHED_GROUP_P insn
6272      gets queued, then all other insns get queued for one cycle later.  */
6273   for (pass = sched_group_found ? 0 : 1; pass < 2; )
6274     {
6275       int n = ready.n_ready;
6276       for (i = 0; i < n; i++)
6277 	{
6278 	  rtx_insn *insn = ready_element (&ready, i);
6279 	  int cost = 0;
6280 	  const char *reason = "resource conflict";
6281 
6282 	  if (DEBUG_INSN_P (insn))
6283 	    continue;
6284 
6285 	  if (sched_group_found && !SCHED_GROUP_P (insn)
6286 	      && ((pass == 0) || (min_cost_group >= 1)))
6287 	    {
6288 	      if (pass == 0)
6289 		continue;
6290 	      cost = min_cost_group;
6291 	      reason = "not in sched group";
6292 	    }
6293 	  else if (modulo_epilogue_p
6294 		   && INSN_EXACT_TICK (insn) == INVALID_TICK)
6295 	    {
6296 	      cost = max_insn_queue_index;
6297 	      reason = "not an epilogue insn";
6298 	    }
6299 	  else if (shadows_only_p && !SHADOW_P (insn))
6300 	    {
6301 	      cost = 1;
6302 	      reason = "not a shadow";
6303 	    }
6304 	  else if (recog_memoized (insn) < 0)
6305 	    {
6306 	      if (!first_cycle_insn_p
6307 		  && (GET_CODE (PATTERN (insn)) == ASM_INPUT
6308 		      || asm_noperands (PATTERN (insn)) >= 0))
6309 		cost = 1;
6310 	      reason = "asm";
6311 	    }
6312 	  else if (sched_pressure != SCHED_PRESSURE_NONE)
6313 	    {
6314 	      if (sched_pressure == SCHED_PRESSURE_MODEL
6315 		  && INSN_TICK (insn) <= clock_var)
6316 		{
6317 		  memcpy (temp_state, curr_state, dfa_state_size);
6318 		  if (state_transition (temp_state, insn) >= 0)
6319 		    INSN_TICK (insn) = clock_var + 1;
6320 		}
6321 	      cost = 0;
6322 	    }
6323 	  else
6324 	    {
6325 	      int delay_cost = 0;
6326 
6327 	      if (delay_htab)
6328 		{
6329 		  struct delay_pair *delay_entry;
6330 		  delay_entry
6331 		    = delay_htab->find_with_hash (insn,
6332 						  htab_hash_pointer (insn));
6333 		  while (delay_entry && delay_cost == 0)
6334 		    {
6335 		      delay_cost = estimate_shadow_tick (delay_entry);
6336 		      if (delay_cost > max_insn_queue_index)
6337 			delay_cost = max_insn_queue_index;
6338 		      delay_entry = delay_entry->next_same_i1;
6339 		    }
6340 		}
6341 
6342 	      memcpy (temp_state, curr_state, dfa_state_size);
6343 	      cost = state_transition (temp_state, insn);
6344 	      if (cost < 0)
6345 		cost = 0;
6346 	      else if (cost == 0)
6347 		cost = 1;
6348 	      if (cost < delay_cost)
6349 		{
6350 		  cost = delay_cost;
6351 		  reason = "shadow tick";
6352 		}
6353 	    }
6354 	  if (cost >= 1)
6355 	    {
6356 	      if (SCHED_GROUP_P (insn) && cost > min_cost_group)
6357 		min_cost_group = cost;
6358 	      ready_remove (&ready, i);
6359 	      /* Normally we'd want to queue INSN for COST cycles.  However,
6360 		 if SCHED_GROUP_P is set, then we must ensure that nothing
6361 		 else comes between INSN and its predecessor.  If there is
6362 		 some other insn ready to fire on the next cycle, then that
6363 		 invariant would be broken.
6364 
6365 		 So when SCHED_GROUP_P is set, just queue this insn for a
6366 		 single cycle.  */
6367 	      queue_insn (insn, SCHED_GROUP_P (insn) ? 1 : cost, reason);
6368 	      if (i + 1 < n)
6369 		break;
6370 	    }
6371 	}
6372       if (i == n)
6373 	pass++;
6374     }
6375 }
6376 
6377 /* Called when we detect that the schedule is impossible.  We examine the
6378    backtrack queue to find the earliest insn that caused this condition.  */
6379 
6380 static struct haifa_saved_data *
verify_shadows(void)6381 verify_shadows (void)
6382 {
6383   struct haifa_saved_data *save, *earliest_fail = NULL;
6384   for (save = backtrack_queue; save; save = save->next)
6385     {
6386       int t;
6387       struct delay_pair *pair = save->delay_pair;
6388       rtx_insn *i1 = pair->i1;
6389 
6390       for (; pair; pair = pair->next_same_i1)
6391 	{
6392 	  rtx_insn *i2 = pair->i2;
6393 
6394 	  if (QUEUE_INDEX (i2) == QUEUE_SCHEDULED)
6395 	    continue;
6396 
6397 	  t = INSN_TICK (i1) + pair_delay (pair);
6398 	  if (t < clock_var)
6399 	    {
6400 	      if (sched_verbose >= 2)
6401 		fprintf (sched_dump,
6402 			 ";;\t\tfailed delay requirements for %d/%d (%d->%d)"
6403 			 ", not ready\n",
6404 			 INSN_UID (pair->i1), INSN_UID (pair->i2),
6405 			 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
6406 	      earliest_fail = save;
6407 	      break;
6408 	    }
6409 	  if (QUEUE_INDEX (i2) >= 0)
6410 	    {
6411 	      int queued_for = INSN_TICK (i2);
6412 
6413 	      if (t < queued_for)
6414 		{
6415 		  if (sched_verbose >= 2)
6416 		    fprintf (sched_dump,
6417 			     ";;\t\tfailed delay requirements for %d/%d"
6418 			     " (%d->%d), queued too late\n",
6419 			     INSN_UID (pair->i1), INSN_UID (pair->i2),
6420 			     INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
6421 		  earliest_fail = save;
6422 		  break;
6423 		}
6424 	    }
6425 	}
6426     }
6427 
6428   return earliest_fail;
6429 }
6430 
6431 /* Print instructions together with useful scheduling information between
6432    HEAD and TAIL (inclusive).  */
6433 static void
dump_insn_stream(rtx_insn * head,rtx_insn * tail)6434 dump_insn_stream (rtx_insn *head, rtx_insn *tail)
6435 {
6436   fprintf (sched_dump, ";;\t| insn | prio |\n");
6437 
6438   rtx_insn *next_tail = NEXT_INSN (tail);
6439   for (rtx_insn *insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6440     {
6441       int priority = NOTE_P (insn) ? 0 : INSN_PRIORITY (insn);
6442       const char *pattern = (NOTE_P (insn)
6443 			     ? "note"
6444 			     : str_pattern_slim (PATTERN (insn)));
6445 
6446       fprintf (sched_dump, ";;\t| %4d | %4d | %-30s ",
6447 	       INSN_UID (insn), priority, pattern);
6448 
6449       if (sched_verbose >= 4)
6450 	{
6451 	  if (NOTE_P (insn) || LABEL_P (insn) || recog_memoized (insn) < 0)
6452 	    fprintf (sched_dump, "nothing");
6453 	  else
6454 	    print_reservation (sched_dump, insn);
6455 	}
6456       fprintf (sched_dump, "\n");
6457     }
6458 }
6459 
6460 /* Use forward list scheduling to rearrange insns of block pointed to by
6461    TARGET_BB, possibly bringing insns from subsequent blocks in the same
6462    region.  */
6463 
6464 bool
schedule_block(basic_block * target_bb,state_t init_state)6465 schedule_block (basic_block *target_bb, state_t init_state)
6466 {
6467   int i;
6468   bool success = modulo_ii == 0;
6469   struct sched_block_state ls;
6470   state_t temp_state = NULL;  /* It is used for multipass scheduling.  */
6471   int sort_p, advance, start_clock_var;
6472 
6473   /* Head/tail info for this block.  */
6474   rtx_insn *prev_head = current_sched_info->prev_head;
6475   rtx_insn *next_tail = current_sched_info->next_tail;
6476   rtx_insn *head = NEXT_INSN (prev_head);
6477   rtx_insn *tail = PREV_INSN (next_tail);
6478 
6479   if ((current_sched_info->flags & DONT_BREAK_DEPENDENCIES) == 0
6480       && sched_pressure != SCHED_PRESSURE_MODEL && !sched_fusion)
6481     find_modifiable_mems (head, tail);
6482 
6483   /* We used to have code to avoid getting parameters moved from hard
6484      argument registers into pseudos.
6485 
6486      However, it was removed when it proved to be of marginal benefit
6487      and caused problems because schedule_block and compute_forward_dependences
6488      had different notions of what the "head" insn was.  */
6489 
6490   gcc_assert (head != tail || INSN_P (head));
6491 
6492   haifa_recovery_bb_recently_added_p = false;
6493 
6494   backtrack_queue = NULL;
6495 
6496   /* Debug info.  */
6497   if (sched_verbose)
6498     {
6499       dump_new_block_header (0, *target_bb, head, tail);
6500 
6501       if (sched_verbose >= 2)
6502 	{
6503 	  dump_insn_stream (head, tail);
6504 	  memset (&rank_for_schedule_stats, 0,
6505 		  sizeof (rank_for_schedule_stats));
6506 	}
6507     }
6508 
6509   if (init_state == NULL)
6510     state_reset (curr_state);
6511   else
6512     memcpy (curr_state, init_state, dfa_state_size);
6513 
6514   /* Clear the ready list.  */
6515   ready.first = ready.veclen - 1;
6516   ready.n_ready = 0;
6517   ready.n_debug = 0;
6518 
6519   /* It is used for first cycle multipass scheduling.  */
6520   temp_state = alloca (dfa_state_size);
6521 
6522   if (targetm.sched.init)
6523     targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
6524 
6525   /* We start inserting insns after PREV_HEAD.  */
6526   last_scheduled_insn = prev_head;
6527   last_nondebug_scheduled_insn = NULL;
6528   nonscheduled_insns_begin = NULL;
6529 
6530   gcc_assert ((NOTE_P (last_scheduled_insn)
6531 	       || DEBUG_INSN_P (last_scheduled_insn))
6532 	      && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
6533 
6534   /* Initialize INSN_QUEUE.  Q_SIZE is the total number of insns in the
6535      queue.  */
6536   q_ptr = 0;
6537   q_size = 0;
6538 
6539   insn_queue = XALLOCAVEC (rtx_insn_list *, max_insn_queue_index + 1);
6540   memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
6541 
6542   /* Start just before the beginning of time.  */
6543   clock_var = -1;
6544 
6545   /* We need queue and ready lists and clock_var be initialized
6546      in try_ready () (which is called through init_ready_list ()).  */
6547   (*current_sched_info->init_ready_list) ();
6548 
6549   if (sched_pressure)
6550     sched_pressure_start_bb (*target_bb);
6551 
6552   /* The algorithm is O(n^2) in the number of ready insns at any given
6553      time in the worst case.  Before reload we are more likely to have
6554      big lists so truncate them to a reasonable size.  */
6555   if (!reload_completed
6556       && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
6557     {
6558       ready_sort_debug (&ready);
6559       ready_sort_real (&ready);
6560 
6561       /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
6562          If there are debug insns, we know they're first.  */
6563       for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
6564 	if (!SCHED_GROUP_P (ready_element (&ready, i)))
6565 	  break;
6566 
6567       if (sched_verbose >= 2)
6568 	{
6569 	  fprintf (sched_dump,
6570 		   ";;\t\tReady list on entry: %d insns:  ", ready.n_ready);
6571 	  debug_ready_list (&ready);
6572 	  fprintf (sched_dump,
6573 		   ";;\t\t before reload => truncated to %d insns\n", i);
6574 	}
6575 
6576       /* Delay all insns past it for 1 cycle.  If debug counter is
6577 	 activated make an exception for the insn right after
6578 	 nonscheduled_insns_begin.  */
6579       {
6580 	rtx_insn *skip_insn;
6581 
6582 	if (dbg_cnt (sched_insn) == false)
6583 	  skip_insn = first_nonscheduled_insn ();
6584 	else
6585 	  skip_insn = NULL;
6586 
6587 	while (i < ready.n_ready)
6588 	  {
6589 	    rtx_insn *insn;
6590 
6591 	    insn = ready_remove (&ready, i);
6592 
6593 	    if (insn != skip_insn)
6594 	      queue_insn (insn, 1, "list truncated");
6595 	  }
6596 	if (skip_insn)
6597 	  ready_add (&ready, skip_insn, true);
6598       }
6599     }
6600 
6601   /* Now we can restore basic block notes and maintain precise cfg.  */
6602   restore_bb_notes (*target_bb);
6603 
6604   last_clock_var = -1;
6605 
6606   advance = 0;
6607 
6608   gcc_assert (scheduled_insns.length () == 0);
6609   sort_p = TRUE;
6610   must_backtrack = false;
6611   modulo_insns_scheduled = 0;
6612 
6613   ls.modulo_epilogue = false;
6614   ls.first_cycle_insn_p = true;
6615 
6616   /* Loop until all the insns in BB are scheduled.  */
6617   while ((*current_sched_info->schedule_more_p) ())
6618     {
6619       perform_replacements_new_cycle ();
6620       do
6621 	{
6622 	  start_clock_var = clock_var;
6623 
6624 	  clock_var++;
6625 
6626 	  advance_one_cycle ();
6627 
6628 	  /* Add to the ready list all pending insns that can be issued now.
6629 	     If there are no ready insns, increment clock until one
6630 	     is ready and add all pending insns at that point to the ready
6631 	     list.  */
6632 	  queue_to_ready (&ready);
6633 
6634 	  gcc_assert (ready.n_ready);
6635 
6636 	  if (sched_verbose >= 2)
6637 	    {
6638 	      fprintf (sched_dump, ";;\t\tReady list after queue_to_ready:");
6639 	      debug_ready_list (&ready);
6640 	    }
6641 	  advance -= clock_var - start_clock_var;
6642 	}
6643       while (advance > 0);
6644 
6645       if (ls.modulo_epilogue)
6646 	{
6647 	  int stage = clock_var / modulo_ii;
6648 	  if (stage > modulo_last_stage * 2 + 2)
6649 	    {
6650 	      if (sched_verbose >= 2)
6651 		fprintf (sched_dump,
6652 			 ";;\t\tmodulo scheduled succeeded at II %d\n",
6653 			 modulo_ii);
6654 	      success = true;
6655 	      goto end_schedule;
6656 	    }
6657 	}
6658       else if (modulo_ii > 0)
6659 	{
6660 	  int stage = clock_var / modulo_ii;
6661 	  if (stage > modulo_max_stages)
6662 	    {
6663 	      if (sched_verbose >= 2)
6664 		fprintf (sched_dump,
6665 			 ";;\t\tfailing schedule due to excessive stages\n");
6666 	      goto end_schedule;
6667 	    }
6668 	  if (modulo_n_insns == modulo_insns_scheduled
6669 	      && stage > modulo_last_stage)
6670 	    {
6671 	      if (sched_verbose >= 2)
6672 		fprintf (sched_dump,
6673 			 ";;\t\tfound kernel after %d stages, II %d\n",
6674 			 stage, modulo_ii);
6675 	      ls.modulo_epilogue = true;
6676 	    }
6677 	}
6678 
6679       prune_ready_list (temp_state, true, false, ls.modulo_epilogue);
6680       if (ready.n_ready == 0)
6681 	continue;
6682       if (must_backtrack)
6683 	goto do_backtrack;
6684 
6685       ls.shadows_only_p = false;
6686       cycle_issued_insns = 0;
6687       ls.can_issue_more = issue_rate;
6688       for (;;)
6689 	{
6690 	  rtx_insn *insn;
6691 	  int cost;
6692 	  bool asm_p;
6693 
6694 	  if (sort_p && ready.n_ready > 0)
6695 	    {
6696 	      /* Sort the ready list based on priority.  This must be
6697 		 done every iteration through the loop, as schedule_insn
6698 		 may have readied additional insns that will not be
6699 		 sorted correctly.  */
6700 	      ready_sort (&ready);
6701 
6702 	      if (sched_verbose >= 2)
6703 		{
6704 		  fprintf (sched_dump,
6705 			   ";;\t\tReady list after ready_sort:    ");
6706 		  debug_ready_list (&ready);
6707 		}
6708 	    }
6709 
6710 	  /* We don't want md sched reorder to even see debug isns, so put
6711 	     them out right away.  */
6712 	  if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
6713 	      && (*current_sched_info->schedule_more_p) ())
6714 	    {
6715 	      while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
6716 		{
6717 		  rtx_insn *insn = ready_remove_first (&ready);
6718 		  gcc_assert (DEBUG_INSN_P (insn));
6719 		  (*current_sched_info->begin_schedule_ready) (insn);
6720 		  scheduled_insns.safe_push (insn);
6721 		  last_scheduled_insn = insn;
6722 		  advance = schedule_insn (insn);
6723 		  gcc_assert (advance == 0);
6724 		  if (ready.n_ready > 0)
6725 		    ready_sort (&ready);
6726 		}
6727 	    }
6728 
6729 	  if (ls.first_cycle_insn_p && !ready.n_ready)
6730 	    break;
6731 
6732 	resume_after_backtrack:
6733 	  /* Allow the target to reorder the list, typically for
6734 	     better instruction bundling.  */
6735 	  if (sort_p
6736 	      && (ready.n_ready == 0
6737 		  || !SCHED_GROUP_P (ready_element (&ready, 0))))
6738 	    {
6739 	      if (ls.first_cycle_insn_p && targetm.sched.reorder)
6740 		ls.can_issue_more
6741 		  = targetm.sched.reorder (sched_dump, sched_verbose,
6742 					   ready_lastpos (&ready),
6743 					   &ready.n_ready, clock_var);
6744 	      else if (!ls.first_cycle_insn_p && targetm.sched.reorder2)
6745 		ls.can_issue_more
6746 		  = targetm.sched.reorder2 (sched_dump, sched_verbose,
6747 					    ready.n_ready
6748 					    ? ready_lastpos (&ready) : NULL,
6749 					    &ready.n_ready, clock_var);
6750 	    }
6751 
6752 	restart_choose_ready:
6753 	  if (sched_verbose >= 2)
6754 	    {
6755 	      fprintf (sched_dump, ";;\tReady list (t = %3d):  ",
6756 		       clock_var);
6757 	      debug_ready_list (&ready);
6758 	      if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
6759 		print_curr_reg_pressure ();
6760 	    }
6761 
6762 	  if (ready.n_ready == 0
6763 	      && ls.can_issue_more
6764 	      && reload_completed)
6765 	    {
6766 	      /* Allow scheduling insns directly from the queue in case
6767 		 there's nothing better to do (ready list is empty) but
6768 		 there are still vacant dispatch slots in the current cycle.  */
6769 	      if (sched_verbose >= 6)
6770 		fprintf (sched_dump,";;\t\tSecond chance\n");
6771 	      memcpy (temp_state, curr_state, dfa_state_size);
6772 	      if (early_queue_to_ready (temp_state, &ready))
6773 		ready_sort (&ready);
6774 	    }
6775 
6776 	  if (ready.n_ready == 0
6777 	      || !ls.can_issue_more
6778 	      || state_dead_lock_p (curr_state)
6779 	      || !(*current_sched_info->schedule_more_p) ())
6780 	    break;
6781 
6782 	  /* Select and remove the insn from the ready list.  */
6783 	  if (sort_p)
6784 	    {
6785 	      int res;
6786 
6787 	      insn = NULL;
6788 	      res = choose_ready (&ready, ls.first_cycle_insn_p, &insn);
6789 
6790 	      if (res < 0)
6791 		/* Finish cycle.  */
6792 		break;
6793 	      if (res > 0)
6794 		goto restart_choose_ready;
6795 
6796 	      gcc_assert (insn != NULL_RTX);
6797 	    }
6798 	  else
6799 	    insn = ready_remove_first (&ready);
6800 
6801 	  if (sched_pressure != SCHED_PRESSURE_NONE
6802 	      && INSN_TICK (insn) > clock_var)
6803 	    {
6804 	      ready_add (&ready, insn, true);
6805 	      advance = 1;
6806 	      break;
6807 	    }
6808 
6809 	  if (targetm.sched.dfa_new_cycle
6810 	      && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
6811 					      insn, last_clock_var,
6812 					      clock_var, &sort_p))
6813 	    /* SORT_P is used by the target to override sorting
6814 	       of the ready list.  This is needed when the target
6815 	       has modified its internal structures expecting that
6816 	       the insn will be issued next.  As we need the insn
6817 	       to have the highest priority (so it will be returned by
6818 	       the ready_remove_first call above), we invoke
6819 	       ready_add (&ready, insn, true).
6820 	       But, still, there is one issue: INSN can be later
6821 	       discarded by scheduler's front end through
6822 	       current_sched_info->can_schedule_ready_p, hence, won't
6823 	       be issued next.  */
6824 	    {
6825 	      ready_add (&ready, insn, true);
6826               break;
6827 	    }
6828 
6829 	  sort_p = TRUE;
6830 
6831 	  if (current_sched_info->can_schedule_ready_p
6832 	      && ! (*current_sched_info->can_schedule_ready_p) (insn))
6833 	    /* We normally get here only if we don't want to move
6834 	       insn from the split block.  */
6835 	    {
6836 	      TODO_SPEC (insn) = DEP_POSTPONED;
6837 	      goto restart_choose_ready;
6838 	    }
6839 
6840 	  if (delay_htab)
6841 	    {
6842 	      /* If this insn is the first part of a delay-slot pair, record a
6843 		 backtrack point.  */
6844 	      struct delay_pair *delay_entry;
6845 	      delay_entry
6846 		= delay_htab->find_with_hash (insn, htab_hash_pointer (insn));
6847 	      if (delay_entry)
6848 		{
6849 		  save_backtrack_point (delay_entry, ls);
6850 		  if (sched_verbose >= 2)
6851 		    fprintf (sched_dump, ";;\t\tsaving backtrack point\n");
6852 		}
6853 	    }
6854 
6855 	  /* DECISION is made.  */
6856 
6857 	  if (modulo_ii > 0 && INSN_UID (insn) < modulo_iter0_max_uid)
6858 	    {
6859 	      modulo_insns_scheduled++;
6860 	      modulo_last_stage = clock_var / modulo_ii;
6861 	    }
6862           if (TODO_SPEC (insn) & SPECULATIVE)
6863             generate_recovery_code (insn);
6864 
6865 	  if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
6866 	    targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
6867 
6868 	  /* Update counters, etc in the scheduler's front end.  */
6869 	  (*current_sched_info->begin_schedule_ready) (insn);
6870 	  scheduled_insns.safe_push (insn);
6871 	  gcc_assert (NONDEBUG_INSN_P (insn));
6872 	  last_nondebug_scheduled_insn = last_scheduled_insn = insn;
6873 
6874 	  if (recog_memoized (insn) >= 0)
6875 	    {
6876 	      memcpy (temp_state, curr_state, dfa_state_size);
6877 	      cost = state_transition (curr_state, insn);
6878 	      if (sched_pressure != SCHED_PRESSURE_WEIGHTED && !sched_fusion)
6879 		gcc_assert (cost < 0);
6880 	      if (memcmp (temp_state, curr_state, dfa_state_size) != 0)
6881 		cycle_issued_insns++;
6882 	      asm_p = false;
6883 	    }
6884 	  else
6885 	    asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
6886 		     || asm_noperands (PATTERN (insn)) >= 0);
6887 
6888 	  if (targetm.sched.variable_issue)
6889 	    ls.can_issue_more =
6890 	      targetm.sched.variable_issue (sched_dump, sched_verbose,
6891 					    insn, ls.can_issue_more);
6892 	  /* A naked CLOBBER or USE generates no instruction, so do
6893 	     not count them against the issue rate.  */
6894 	  else if (GET_CODE (PATTERN (insn)) != USE
6895 		   && GET_CODE (PATTERN (insn)) != CLOBBER)
6896 	    ls.can_issue_more--;
6897 	  advance = schedule_insn (insn);
6898 
6899 	  if (SHADOW_P (insn))
6900 	    ls.shadows_only_p = true;
6901 
6902 	  /* After issuing an asm insn we should start a new cycle.  */
6903 	  if (advance == 0 && asm_p)
6904 	    advance = 1;
6905 
6906 	  if (must_backtrack)
6907 	    break;
6908 
6909 	  if (advance != 0)
6910 	    break;
6911 
6912 	  ls.first_cycle_insn_p = false;
6913 	  if (ready.n_ready > 0)
6914 	    prune_ready_list (temp_state, false, ls.shadows_only_p,
6915 			      ls.modulo_epilogue);
6916 	}
6917 
6918     do_backtrack:
6919       if (!must_backtrack)
6920 	for (i = 0; i < ready.n_ready; i++)
6921 	  {
6922 	    rtx_insn *insn = ready_element (&ready, i);
6923 	    if (INSN_EXACT_TICK (insn) == clock_var)
6924 	      {
6925 		must_backtrack = true;
6926 		clock_var++;
6927 		break;
6928 	      }
6929 	  }
6930       if (must_backtrack && modulo_ii > 0)
6931 	{
6932 	  if (modulo_backtracks_left == 0)
6933 	    goto end_schedule;
6934 	  modulo_backtracks_left--;
6935 	}
6936       while (must_backtrack)
6937 	{
6938 	  struct haifa_saved_data *failed;
6939 	  rtx_insn *failed_insn;
6940 
6941 	  must_backtrack = false;
6942 	  failed = verify_shadows ();
6943 	  gcc_assert (failed);
6944 
6945 	  failed_insn = failed->delay_pair->i1;
6946 	  /* Clear these queues.  */
6947 	  perform_replacements_new_cycle ();
6948 	  toggle_cancelled_flags (false);
6949 	  unschedule_insns_until (failed_insn);
6950 	  while (failed != backtrack_queue)
6951 	    free_topmost_backtrack_point (true);
6952 	  restore_last_backtrack_point (&ls);
6953 	  if (sched_verbose >= 2)
6954 	    fprintf (sched_dump, ";;\t\trewind to cycle %d\n", clock_var);
6955 	  /* Delay by at least a cycle.  This could cause additional
6956 	     backtracking.  */
6957 	  queue_insn (failed_insn, 1, "backtracked");
6958 	  advance = 0;
6959 	  if (must_backtrack)
6960 	    continue;
6961 	  if (ready.n_ready > 0)
6962 	    goto resume_after_backtrack;
6963 	  else
6964 	    {
6965 	      if (clock_var == 0 && ls.first_cycle_insn_p)
6966 		goto end_schedule;
6967 	      advance = 1;
6968 	      break;
6969 	    }
6970 	}
6971       ls.first_cycle_insn_p = true;
6972     }
6973   if (ls.modulo_epilogue)
6974     success = true;
6975  end_schedule:
6976   if (!ls.first_cycle_insn_p || advance)
6977     advance_one_cycle ();
6978   perform_replacements_new_cycle ();
6979   if (modulo_ii > 0)
6980     {
6981       /* Once again, debug insn suckiness: they can be on the ready list
6982 	 even if they have unresolved dependencies.  To make our view
6983 	 of the world consistent, remove such "ready" insns.  */
6984     restart_debug_insn_loop:
6985       for (i = ready.n_ready - 1; i >= 0; i--)
6986 	{
6987 	  rtx_insn *x;
6988 
6989 	  x = ready_element (&ready, i);
6990 	  if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (x)) != NULL
6991 	      || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (x)) != NULL)
6992 	    {
6993 	      ready_remove (&ready, i);
6994 	      goto restart_debug_insn_loop;
6995 	    }
6996 	}
6997       for (i = ready.n_ready - 1; i >= 0; i--)
6998 	{
6999 	  rtx_insn *x;
7000 
7001 	  x = ready_element (&ready, i);
7002 	  resolve_dependencies (x);
7003 	}
7004       for (i = 0; i <= max_insn_queue_index; i++)
7005 	{
7006 	  rtx_insn_list *link;
7007 	  while ((link = insn_queue[i]) != NULL)
7008 	    {
7009 	      rtx_insn *x = link->insn ();
7010 	      insn_queue[i] = link->next ();
7011 	      QUEUE_INDEX (x) = QUEUE_NOWHERE;
7012 	      free_INSN_LIST_node (link);
7013 	      resolve_dependencies (x);
7014 	    }
7015 	}
7016     }
7017 
7018   if (!success)
7019     undo_all_replacements ();
7020 
7021   /* Debug info.  */
7022   if (sched_verbose)
7023     {
7024       fprintf (sched_dump, ";;\tReady list (final):  ");
7025       debug_ready_list (&ready);
7026     }
7027 
7028   if (modulo_ii == 0 && current_sched_info->queue_must_finish_empty)
7029     /* Sanity check -- queue must be empty now.  Meaningless if region has
7030        multiple bbs.  */
7031     gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
7032   else if (modulo_ii == 0)
7033     {
7034       /* We must maintain QUEUE_INDEX between blocks in region.  */
7035       for (i = ready.n_ready - 1; i >= 0; i--)
7036 	{
7037 	  rtx_insn *x;
7038 
7039 	  x = ready_element (&ready, i);
7040 	  QUEUE_INDEX (x) = QUEUE_NOWHERE;
7041 	  TODO_SPEC (x) = HARD_DEP;
7042 	}
7043 
7044       if (q_size)
7045 	for (i = 0; i <= max_insn_queue_index; i++)
7046 	  {
7047 	    rtx_insn_list *link;
7048 	    for (link = insn_queue[i]; link; link = link->next ())
7049 	      {
7050 		rtx_insn *x;
7051 
7052 		x = link->insn ();
7053 		QUEUE_INDEX (x) = QUEUE_NOWHERE;
7054 		TODO_SPEC (x) = HARD_DEP;
7055 	      }
7056 	    free_INSN_LIST_list (&insn_queue[i]);
7057 	  }
7058     }
7059 
7060   if (sched_pressure == SCHED_PRESSURE_MODEL)
7061     model_end_schedule ();
7062 
7063   if (success)
7064     {
7065       commit_schedule (prev_head, tail, target_bb);
7066       if (sched_verbose)
7067 	fprintf (sched_dump, ";;   total time = %d\n", clock_var);
7068     }
7069   else
7070     last_scheduled_insn = tail;
7071 
7072   scheduled_insns.truncate (0);
7073 
7074   if (!current_sched_info->queue_must_finish_empty
7075       || haifa_recovery_bb_recently_added_p)
7076     {
7077       /* INSN_TICK (minimum clock tick at which the insn becomes
7078          ready) may be not correct for the insn in the subsequent
7079          blocks of the region.  We should use a correct value of
7080          `clock_var' or modify INSN_TICK.  It is better to keep
7081          clock_var value equal to 0 at the start of a basic block.
7082          Therefore we modify INSN_TICK here.  */
7083       fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
7084     }
7085 
7086   if (targetm.sched.finish)
7087     {
7088       targetm.sched.finish (sched_dump, sched_verbose);
7089       /* Target might have added some instructions to the scheduled block
7090 	 in its md_finish () hook.  These new insns don't have any data
7091 	 initialized and to identify them we extend h_i_d so that they'll
7092 	 get zero luids.  */
7093       sched_extend_luids ();
7094     }
7095 
7096   /* Update head/tail boundaries.  */
7097   head = NEXT_INSN (prev_head);
7098   tail = last_scheduled_insn;
7099 
7100   if (sched_verbose)
7101     {
7102       fprintf (sched_dump, ";;   new head = %d\n;;   new tail = %d\n",
7103 	       INSN_UID (head), INSN_UID (tail));
7104 
7105       if (sched_verbose >= 2)
7106 	{
7107 	  dump_insn_stream (head, tail);
7108 	  print_rank_for_schedule_stats (";; TOTAL ", &rank_for_schedule_stats,
7109 					 NULL);
7110 	}
7111 
7112       fprintf (sched_dump, "\n");
7113     }
7114 
7115   head = restore_other_notes (head, NULL);
7116 
7117   current_sched_info->head = head;
7118   current_sched_info->tail = tail;
7119 
7120   free_backtrack_queue ();
7121 
7122   return success;
7123 }
7124 
7125 /* Set_priorities: compute priority of each insn in the block.  */
7126 
7127 int
set_priorities(rtx_insn * head,rtx_insn * tail)7128 set_priorities (rtx_insn *head, rtx_insn *tail)
7129 {
7130   rtx_insn *insn;
7131   int n_insn;
7132   int sched_max_insns_priority =
7133 	current_sched_info->sched_max_insns_priority;
7134   rtx_insn *prev_head;
7135 
7136   if (head == tail && ! INSN_P (head))
7137     gcc_unreachable ();
7138 
7139   n_insn = 0;
7140 
7141   prev_head = PREV_INSN (head);
7142   for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
7143     {
7144       if (!INSN_P (insn))
7145 	continue;
7146 
7147       n_insn++;
7148       (void) priority (insn);
7149 
7150       gcc_assert (INSN_PRIORITY_KNOWN (insn));
7151 
7152       sched_max_insns_priority = MAX (sched_max_insns_priority,
7153 				      INSN_PRIORITY (insn));
7154     }
7155 
7156   current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
7157 
7158   return n_insn;
7159 }
7160 
7161 /* Set sched_dump and sched_verbose for the desired debugging output. */
7162 void
setup_sched_dump(void)7163 setup_sched_dump (void)
7164 {
7165   sched_verbose = sched_verbose_param;
7166   sched_dump = dump_file;
7167   if (!dump_file)
7168     sched_verbose = 0;
7169 }
7170 
7171 /* Allocate data for register pressure sensitive scheduling.  */
7172 static void
alloc_global_sched_pressure_data(void)7173 alloc_global_sched_pressure_data (void)
7174 {
7175   if (sched_pressure != SCHED_PRESSURE_NONE)
7176     {
7177       int i, max_regno = max_reg_num ();
7178 
7179       if (sched_dump != NULL)
7180 	/* We need info about pseudos for rtl dumps about pseudo
7181 	   classes and costs.  */
7182 	regstat_init_n_sets_and_refs ();
7183       ira_set_pseudo_classes (true, sched_verbose ? sched_dump : NULL);
7184       sched_regno_pressure_class
7185 	= (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
7186       for (i = 0; i < max_regno; i++)
7187 	sched_regno_pressure_class[i]
7188 	  = (i < FIRST_PSEUDO_REGISTER
7189 	     ? ira_pressure_class_translate[REGNO_REG_CLASS (i)]
7190 	     : ira_pressure_class_translate[reg_allocno_class (i)]);
7191       curr_reg_live = BITMAP_ALLOC (NULL);
7192       if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
7193 	{
7194 	  saved_reg_live = BITMAP_ALLOC (NULL);
7195 	  region_ref_regs = BITMAP_ALLOC (NULL);
7196 	}
7197       if (sched_pressure == SCHED_PRESSURE_MODEL)
7198 	tmp_bitmap = BITMAP_ALLOC (NULL);
7199 
7200       /* Calculate number of CALL_SAVED_REGS and FIXED_REGS in register classes
7201 	 that we calculate register pressure for.  */
7202       for (int c = 0; c < ira_pressure_classes_num; ++c)
7203 	{
7204 	  enum reg_class cl = ira_pressure_classes[c];
7205 
7206 	  call_saved_regs_num[cl] = 0;
7207 	  fixed_regs_num[cl] = 0;
7208 
7209 	  for (int i = 0; i < ira_class_hard_regs_num[cl]; ++i)
7210 	    if (!call_used_regs[ira_class_hard_regs[cl][i]])
7211 	      ++call_saved_regs_num[cl];
7212 	    else if (fixed_regs[ira_class_hard_regs[cl][i]])
7213 	      ++fixed_regs_num[cl];
7214 	}
7215     }
7216 }
7217 
7218 /*  Free data for register pressure sensitive scheduling.  Also called
7219     from schedule_region when stopping sched-pressure early.  */
7220 void
free_global_sched_pressure_data(void)7221 free_global_sched_pressure_data (void)
7222 {
7223   if (sched_pressure != SCHED_PRESSURE_NONE)
7224     {
7225       if (regstat_n_sets_and_refs != NULL)
7226 	regstat_free_n_sets_and_refs ();
7227       if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
7228 	{
7229 	  BITMAP_FREE (region_ref_regs);
7230 	  BITMAP_FREE (saved_reg_live);
7231 	}
7232       if (sched_pressure == SCHED_PRESSURE_MODEL)
7233 	BITMAP_FREE (tmp_bitmap);
7234       BITMAP_FREE (curr_reg_live);
7235       free (sched_regno_pressure_class);
7236     }
7237 }
7238 
7239 /* Initialize some global state for the scheduler.  This function works
7240    with the common data shared between all the schedulers.  It is called
7241    from the scheduler specific initialization routine.  */
7242 
7243 void
sched_init(void)7244 sched_init (void)
7245 {
7246   /* Disable speculative loads in their presence if cc0 defined.  */
7247   if (HAVE_cc0)
7248   flag_schedule_speculative_load = 0;
7249 
7250   if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
7251     targetm.sched.dispatch_do (NULL, DISPATCH_INIT);
7252 
7253   if (live_range_shrinkage_p)
7254     sched_pressure = SCHED_PRESSURE_WEIGHTED;
7255   else if (flag_sched_pressure
7256 	   && !reload_completed
7257 	   && common_sched_info->sched_pass_id == SCHED_RGN_PASS)
7258     sched_pressure = ((enum sched_pressure_algorithm)
7259 		      PARAM_VALUE (PARAM_SCHED_PRESSURE_ALGORITHM));
7260   else
7261     sched_pressure = SCHED_PRESSURE_NONE;
7262 
7263   if (sched_pressure != SCHED_PRESSURE_NONE)
7264     ira_setup_eliminable_regset ();
7265 
7266   /* Initialize SPEC_INFO.  */
7267   if (targetm.sched.set_sched_flags)
7268     {
7269       spec_info = &spec_info_var;
7270       targetm.sched.set_sched_flags (spec_info);
7271 
7272       if (spec_info->mask != 0)
7273         {
7274           spec_info->data_weakness_cutoff =
7275             (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
7276           spec_info->control_weakness_cutoff =
7277             (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
7278              * REG_BR_PROB_BASE) / 100;
7279         }
7280       else
7281 	/* So we won't read anything accidentally.  */
7282 	spec_info = NULL;
7283 
7284     }
7285   else
7286     /* So we won't read anything accidentally.  */
7287     spec_info = 0;
7288 
7289   /* Initialize issue_rate.  */
7290   if (targetm.sched.issue_rate)
7291     issue_rate = targetm.sched.issue_rate ();
7292   else
7293     issue_rate = 1;
7294 
7295   if (targetm.sched.first_cycle_multipass_dfa_lookahead
7296       /* Don't use max_issue with reg_pressure scheduling.  Multipass
7297 	 scheduling and reg_pressure scheduling undo each other's decisions.  */
7298       && sched_pressure == SCHED_PRESSURE_NONE)
7299     dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
7300   else
7301     dfa_lookahead = 0;
7302 
7303   /* Set to "0" so that we recalculate.  */
7304   max_lookahead_tries = 0;
7305 
7306   if (targetm.sched.init_dfa_pre_cycle_insn)
7307     targetm.sched.init_dfa_pre_cycle_insn ();
7308 
7309   if (targetm.sched.init_dfa_post_cycle_insn)
7310     targetm.sched.init_dfa_post_cycle_insn ();
7311 
7312   dfa_start ();
7313   dfa_state_size = state_size ();
7314 
7315   init_alias_analysis ();
7316 
7317   if (!sched_no_dce)
7318     df_set_flags (DF_LR_RUN_DCE);
7319   df_note_add_problem ();
7320 
7321   /* More problems needed for interloop dep calculation in SMS.  */
7322   if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
7323     {
7324       df_rd_add_problem ();
7325       df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
7326     }
7327 
7328   df_analyze ();
7329 
7330   /* Do not run DCE after reload, as this can kill nops inserted
7331      by bundling.  */
7332   if (reload_completed)
7333     df_clear_flags (DF_LR_RUN_DCE);
7334 
7335   regstat_compute_calls_crossed ();
7336 
7337   if (targetm.sched.init_global)
7338     targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
7339 
7340   alloc_global_sched_pressure_data ();
7341 
7342   curr_state = xmalloc (dfa_state_size);
7343 }
7344 
7345 static void haifa_init_only_bb (basic_block, basic_block);
7346 
7347 /* Initialize data structures specific to the Haifa scheduler.  */
7348 void
haifa_sched_init(void)7349 haifa_sched_init (void)
7350 {
7351   setup_sched_dump ();
7352   sched_init ();
7353 
7354   scheduled_insns.create (0);
7355 
7356   if (spec_info != NULL)
7357     {
7358       sched_deps_info->use_deps_list = 1;
7359       sched_deps_info->generate_spec_deps = 1;
7360     }
7361 
7362   /* Initialize luids, dependency caches, target and h_i_d for the
7363      whole function.  */
7364   {
7365     sched_init_bbs ();
7366 
7367     auto_vec<basic_block> bbs (n_basic_blocks_for_fn (cfun));
7368     basic_block bb;
7369     FOR_EACH_BB_FN (bb, cfun)
7370       bbs.quick_push (bb);
7371     sched_init_luids (bbs);
7372     sched_deps_init (true);
7373     sched_extend_target ();
7374     haifa_init_h_i_d (bbs);
7375   }
7376 
7377   sched_init_only_bb = haifa_init_only_bb;
7378   sched_split_block = sched_split_block_1;
7379   sched_create_empty_bb = sched_create_empty_bb_1;
7380   haifa_recovery_bb_ever_added_p = false;
7381 
7382   nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
7383   before_recovery = 0;
7384   after_recovery = 0;
7385 
7386   modulo_ii = 0;
7387 }
7388 
7389 /* Finish work with the data specific to the Haifa scheduler.  */
7390 void
haifa_sched_finish(void)7391 haifa_sched_finish (void)
7392 {
7393   sched_create_empty_bb = NULL;
7394   sched_split_block = NULL;
7395   sched_init_only_bb = NULL;
7396 
7397   if (spec_info && spec_info->dump)
7398     {
7399       char c = reload_completed ? 'a' : 'b';
7400 
7401       fprintf (spec_info->dump,
7402 	       ";; %s:\n", current_function_name ());
7403 
7404       fprintf (spec_info->dump,
7405                ";; Procedure %cr-begin-data-spec motions == %d\n",
7406                c, nr_begin_data);
7407       fprintf (spec_info->dump,
7408                ";; Procedure %cr-be-in-data-spec motions == %d\n",
7409                c, nr_be_in_data);
7410       fprintf (spec_info->dump,
7411                ";; Procedure %cr-begin-control-spec motions == %d\n",
7412                c, nr_begin_control);
7413       fprintf (spec_info->dump,
7414                ";; Procedure %cr-be-in-control-spec motions == %d\n",
7415                c, nr_be_in_control);
7416     }
7417 
7418   scheduled_insns.release ();
7419 
7420   /* Finalize h_i_d, dependency caches, and luids for the whole
7421      function.  Target will be finalized in md_global_finish ().  */
7422   sched_deps_finish ();
7423   sched_finish_luids ();
7424   current_sched_info = NULL;
7425   insn_queue = NULL;
7426   sched_finish ();
7427 }
7428 
7429 /* Free global data used during insn scheduling.  This function works with
7430    the common data shared between the schedulers.  */
7431 
7432 void
sched_finish(void)7433 sched_finish (void)
7434 {
7435   haifa_finish_h_i_d ();
7436   free_global_sched_pressure_data ();
7437   free (curr_state);
7438 
7439   if (targetm.sched.finish_global)
7440     targetm.sched.finish_global (sched_dump, sched_verbose);
7441 
7442   end_alias_analysis ();
7443 
7444   regstat_free_calls_crossed ();
7445 
7446   dfa_finish ();
7447 }
7448 
7449 /* Free all delay_pair structures that were recorded.  */
7450 void
free_delay_pairs(void)7451 free_delay_pairs (void)
7452 {
7453   if (delay_htab)
7454     {
7455       delay_htab->empty ();
7456       delay_htab_i2->empty ();
7457     }
7458 }
7459 
7460 /* Fix INSN_TICKs of the instructions in the current block as well as
7461    INSN_TICKs of their dependents.
7462    HEAD and TAIL are the begin and the end of the current scheduled block.  */
7463 static void
fix_inter_tick(rtx_insn * head,rtx_insn * tail)7464 fix_inter_tick (rtx_insn *head, rtx_insn *tail)
7465 {
7466   /* Set of instructions with corrected INSN_TICK.  */
7467   auto_bitmap processed;
7468   /* ??? It is doubtful if we should assume that cycle advance happens on
7469      basic block boundaries.  Basically insns that are unconditionally ready
7470      on the start of the block are more preferable then those which have
7471      a one cycle dependency over insn from the previous block.  */
7472   int next_clock = clock_var + 1;
7473 
7474   /* Iterates over scheduled instructions and fix their INSN_TICKs and
7475      INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
7476      across different blocks.  */
7477   for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
7478     {
7479       if (INSN_P (head))
7480 	{
7481 	  int tick;
7482 	  sd_iterator_def sd_it;
7483 	  dep_t dep;
7484 
7485 	  tick = INSN_TICK (head);
7486 	  gcc_assert (tick >= MIN_TICK);
7487 
7488 	  /* Fix INSN_TICK of instruction from just scheduled block.  */
7489 	  if (bitmap_set_bit (processed, INSN_LUID (head)))
7490 	    {
7491 	      tick -= next_clock;
7492 
7493 	      if (tick < MIN_TICK)
7494 		tick = MIN_TICK;
7495 
7496 	      INSN_TICK (head) = tick;
7497 	    }
7498 
7499 	  if (DEBUG_INSN_P (head))
7500 	    continue;
7501 
7502 	  FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
7503 	    {
7504 	      rtx_insn *next;
7505 
7506 	      next = DEP_CON (dep);
7507 	      tick = INSN_TICK (next);
7508 
7509 	      if (tick != INVALID_TICK
7510 		  /* If NEXT has its INSN_TICK calculated, fix it.
7511 		     If not - it will be properly calculated from
7512 		     scratch later in fix_tick_ready.  */
7513 		  && bitmap_set_bit (processed, INSN_LUID (next)))
7514 		{
7515 		  tick -= next_clock;
7516 
7517 		  if (tick < MIN_TICK)
7518 		    tick = MIN_TICK;
7519 
7520 		  if (tick > INTER_TICK (next))
7521 		    INTER_TICK (next) = tick;
7522 		  else
7523 		    tick = INTER_TICK (next);
7524 
7525 		  INSN_TICK (next) = tick;
7526 		}
7527 	    }
7528 	}
7529     }
7530 }
7531 
7532 /* Check if NEXT is ready to be added to the ready or queue list.
7533    If "yes", add it to the proper list.
7534    Returns:
7535       -1 - is not ready yet,
7536        0 - added to the ready list,
7537    0 < N - queued for N cycles.  */
7538 int
try_ready(rtx_insn * next)7539 try_ready (rtx_insn *next)
7540 {
7541   ds_t old_ts, new_ts;
7542 
7543   old_ts = TODO_SPEC (next);
7544 
7545   gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP | DEP_CONTROL | DEP_POSTPONED))
7546 	      && (old_ts == HARD_DEP
7547 		  || old_ts == DEP_POSTPONED
7548 		  || (old_ts & SPECULATIVE)
7549 		  || old_ts == DEP_CONTROL));
7550 
7551   new_ts = recompute_todo_spec (next, false);
7552 
7553   if (new_ts & (HARD_DEP | DEP_POSTPONED))
7554     gcc_assert (new_ts == old_ts
7555 		&& QUEUE_INDEX (next) == QUEUE_NOWHERE);
7556   else if (current_sched_info->new_ready)
7557     new_ts = current_sched_info->new_ready (next, new_ts);
7558 
7559   /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
7560      have its original pattern or changed (speculative) one.  This is due
7561      to changing ebb in region scheduling.
7562      * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
7563      has speculative pattern.
7564 
7565      We can't assert (!(new_ts & HARD_DEP) || new_ts == old_ts) here because
7566      control-speculative NEXT could have been discarded by sched-rgn.c
7567      (the same case as when discarded by can_schedule_ready_p ()).  */
7568 
7569   if ((new_ts & SPECULATIVE)
7570       /* If (old_ts == new_ts), then (old_ts & SPECULATIVE) and we don't
7571 	 need to change anything.  */
7572       && new_ts != old_ts)
7573     {
7574       int res;
7575       rtx new_pat;
7576 
7577       gcc_assert ((new_ts & SPECULATIVE) && !(new_ts & ~SPECULATIVE));
7578 
7579       res = haifa_speculate_insn (next, new_ts, &new_pat);
7580 
7581       switch (res)
7582 	{
7583 	case -1:
7584 	  /* It would be nice to change DEP_STATUS of all dependences,
7585 	     which have ((DEP_STATUS & SPECULATIVE) == new_ts) to HARD_DEP,
7586 	     so we won't reanalyze anything.  */
7587 	  new_ts = HARD_DEP;
7588 	  break;
7589 
7590 	case 0:
7591 	  /* We follow the rule, that every speculative insn
7592 	     has non-null ORIG_PAT.  */
7593 	  if (!ORIG_PAT (next))
7594 	    ORIG_PAT (next) = PATTERN (next);
7595 	  break;
7596 
7597 	case 1:
7598 	  if (!ORIG_PAT (next))
7599 	    /* If we gonna to overwrite the original pattern of insn,
7600 	       save it.  */
7601 	    ORIG_PAT (next) = PATTERN (next);
7602 
7603 	  res = haifa_change_pattern (next, new_pat);
7604 	  gcc_assert (res);
7605 	  break;
7606 
7607 	default:
7608 	  gcc_unreachable ();
7609 	}
7610     }
7611 
7612   /* We need to restore pattern only if (new_ts == 0), because otherwise it is
7613      either correct (new_ts & SPECULATIVE),
7614      or we simply don't care (new_ts & HARD_DEP).  */
7615 
7616   gcc_assert (!ORIG_PAT (next)
7617 	      || !IS_SPECULATION_BRANCHY_CHECK_P (next));
7618 
7619   TODO_SPEC (next) = new_ts;
7620 
7621   if (new_ts & (HARD_DEP | DEP_POSTPONED))
7622     {
7623       /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
7624 	 control-speculative NEXT could have been discarded by sched-rgn.c
7625 	 (the same case as when discarded by can_schedule_ready_p ()).  */
7626       /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
7627 
7628       change_queue_index (next, QUEUE_NOWHERE);
7629 
7630       return -1;
7631     }
7632   else if (!(new_ts & BEGIN_SPEC)
7633 	   && ORIG_PAT (next) && PREDICATED_PAT (next) == NULL_RTX
7634 	   && !IS_SPECULATION_CHECK_P (next))
7635     /* We should change pattern of every previously speculative
7636        instruction - and we determine if NEXT was speculative by using
7637        ORIG_PAT field.  Except one case - speculation checks have ORIG_PAT
7638        pat too, so skip them.  */
7639     {
7640       bool success = haifa_change_pattern (next, ORIG_PAT (next));
7641       gcc_assert (success);
7642       ORIG_PAT (next) = 0;
7643     }
7644 
7645   if (sched_verbose >= 2)
7646     {
7647       fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
7648                (*current_sched_info->print_insn) (next, 0));
7649 
7650       if (spec_info && spec_info->dump)
7651         {
7652           if (new_ts & BEGIN_DATA)
7653             fprintf (spec_info->dump, "; data-spec;");
7654           if (new_ts & BEGIN_CONTROL)
7655             fprintf (spec_info->dump, "; control-spec;");
7656           if (new_ts & BE_IN_CONTROL)
7657             fprintf (spec_info->dump, "; in-control-spec;");
7658         }
7659       if (TODO_SPEC (next) & DEP_CONTROL)
7660 	fprintf (sched_dump, " predicated");
7661       fprintf (sched_dump, "\n");
7662     }
7663 
7664   adjust_priority (next);
7665 
7666   return fix_tick_ready (next);
7667 }
7668 
7669 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list.  */
7670 static int
fix_tick_ready(rtx_insn * next)7671 fix_tick_ready (rtx_insn *next)
7672 {
7673   int tick, delay;
7674 
7675   if (!DEBUG_INSN_P (next) && !sd_lists_empty_p (next, SD_LIST_RES_BACK))
7676     {
7677       int full_p;
7678       sd_iterator_def sd_it;
7679       dep_t dep;
7680 
7681       tick = INSN_TICK (next);
7682       /* if tick is not equal to INVALID_TICK, then update
7683 	 INSN_TICK of NEXT with the most recent resolved dependence
7684 	 cost.  Otherwise, recalculate from scratch.  */
7685       full_p = (tick == INVALID_TICK);
7686 
7687       FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
7688         {
7689           rtx_insn *pro = DEP_PRO (dep);
7690           int tick1;
7691 
7692 	  gcc_assert (INSN_TICK (pro) >= MIN_TICK);
7693 
7694           tick1 = INSN_TICK (pro) + dep_cost (dep);
7695           if (tick1 > tick)
7696             tick = tick1;
7697 
7698 	  if (!full_p)
7699 	    break;
7700         }
7701     }
7702   else
7703     tick = -1;
7704 
7705   INSN_TICK (next) = tick;
7706 
7707   delay = tick - clock_var;
7708   if (delay <= 0 || sched_pressure != SCHED_PRESSURE_NONE || sched_fusion)
7709     delay = QUEUE_READY;
7710 
7711   change_queue_index (next, delay);
7712 
7713   return delay;
7714 }
7715 
7716 /* Move NEXT to the proper queue list with (DELAY >= 1),
7717    or add it to the ready list (DELAY == QUEUE_READY),
7718    or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE).  */
7719 static void
change_queue_index(rtx_insn * next,int delay)7720 change_queue_index (rtx_insn *next, int delay)
7721 {
7722   int i = QUEUE_INDEX (next);
7723 
7724   gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
7725 	      && delay != 0);
7726   gcc_assert (i != QUEUE_SCHEDULED);
7727 
7728   if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
7729       || (delay < 0 && delay == i))
7730     /* We have nothing to do.  */
7731     return;
7732 
7733   /* Remove NEXT from wherever it is now.  */
7734   if (i == QUEUE_READY)
7735     ready_remove_insn (next);
7736   else if (i >= 0)
7737     queue_remove (next);
7738 
7739   /* Add it to the proper place.  */
7740   if (delay == QUEUE_READY)
7741     ready_add (readyp, next, false);
7742   else if (delay >= 1)
7743     queue_insn (next, delay, "change queue index");
7744 
7745   if (sched_verbose >= 2)
7746     {
7747       fprintf (sched_dump, ";;\t\ttick updated: insn %s",
7748 	       (*current_sched_info->print_insn) (next, 0));
7749 
7750       if (delay == QUEUE_READY)
7751 	fprintf (sched_dump, " into ready\n");
7752       else if (delay >= 1)
7753 	fprintf (sched_dump, " into queue with cost=%d\n", delay);
7754       else
7755 	fprintf (sched_dump, " removed from ready or queue lists\n");
7756     }
7757 }
7758 
7759 static int sched_ready_n_insns = -1;
7760 
7761 /* Initialize per region data structures.  */
7762 void
sched_extend_ready_list(int new_sched_ready_n_insns)7763 sched_extend_ready_list (int new_sched_ready_n_insns)
7764 {
7765   int i;
7766 
7767   if (sched_ready_n_insns == -1)
7768     /* At the first call we need to initialize one more choice_stack
7769        entry.  */
7770     {
7771       i = 0;
7772       sched_ready_n_insns = 0;
7773       scheduled_insns.reserve (new_sched_ready_n_insns);
7774     }
7775   else
7776     i = sched_ready_n_insns + 1;
7777 
7778   ready.veclen = new_sched_ready_n_insns + issue_rate;
7779   ready.vec = XRESIZEVEC (rtx_insn *, ready.vec, ready.veclen);
7780 
7781   gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
7782 
7783   ready_try = (signed char *) xrecalloc (ready_try, new_sched_ready_n_insns,
7784 					 sched_ready_n_insns,
7785 					 sizeof (*ready_try));
7786 
7787   /* We allocate +1 element to save initial state in the choice_stack[0]
7788      entry.  */
7789   choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
7790 			     new_sched_ready_n_insns + 1);
7791 
7792   for (; i <= new_sched_ready_n_insns; i++)
7793     {
7794       choice_stack[i].state = xmalloc (dfa_state_size);
7795 
7796       if (targetm.sched.first_cycle_multipass_init)
7797 	targetm.sched.first_cycle_multipass_init (&(choice_stack[i]
7798 						    .target_data));
7799     }
7800 
7801   sched_ready_n_insns = new_sched_ready_n_insns;
7802 }
7803 
7804 /* Free per region data structures.  */
7805 void
sched_finish_ready_list(void)7806 sched_finish_ready_list (void)
7807 {
7808   int i;
7809 
7810   free (ready.vec);
7811   ready.vec = NULL;
7812   ready.veclen = 0;
7813 
7814   free (ready_try);
7815   ready_try = NULL;
7816 
7817   for (i = 0; i <= sched_ready_n_insns; i++)
7818     {
7819       if (targetm.sched.first_cycle_multipass_fini)
7820 	targetm.sched.first_cycle_multipass_fini (&(choice_stack[i]
7821 						    .target_data));
7822 
7823       free (choice_stack [i].state);
7824     }
7825   free (choice_stack);
7826   choice_stack = NULL;
7827 
7828   sched_ready_n_insns = -1;
7829 }
7830 
7831 static int
haifa_luid_for_non_insn(rtx x)7832 haifa_luid_for_non_insn (rtx x)
7833 {
7834   gcc_assert (NOTE_P (x) || LABEL_P (x));
7835 
7836   return 0;
7837 }
7838 
7839 /* Generates recovery code for INSN.  */
7840 static void
generate_recovery_code(rtx_insn * insn)7841 generate_recovery_code (rtx_insn *insn)
7842 {
7843   if (TODO_SPEC (insn) & BEGIN_SPEC)
7844     begin_speculative_block (insn);
7845 
7846   /* Here we have insn with no dependencies to
7847      instructions other then CHECK_SPEC ones.  */
7848 
7849   if (TODO_SPEC (insn) & BE_IN_SPEC)
7850     add_to_speculative_block (insn);
7851 }
7852 
7853 /* Helper function.
7854    Tries to add speculative dependencies of type FS between instructions
7855    in deps_list L and TWIN.  */
7856 static void
process_insn_forw_deps_be_in_spec(rtx_insn * insn,rtx_insn * twin,ds_t fs)7857 process_insn_forw_deps_be_in_spec (rtx_insn *insn, rtx_insn *twin, ds_t fs)
7858 {
7859   sd_iterator_def sd_it;
7860   dep_t dep;
7861 
7862   FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
7863     {
7864       ds_t ds;
7865       rtx_insn *consumer;
7866 
7867       consumer = DEP_CON (dep);
7868 
7869       ds = DEP_STATUS (dep);
7870 
7871       if (/* If we want to create speculative dep.  */
7872 	  fs
7873 	  /* And we can do that because this is a true dep.  */
7874 	  && (ds & DEP_TYPES) == DEP_TRUE)
7875 	{
7876 	  gcc_assert (!(ds & BE_IN_SPEC));
7877 
7878 	  if (/* If this dep can be overcome with 'begin speculation'.  */
7879 	      ds & BEGIN_SPEC)
7880 	    /* Then we have a choice: keep the dep 'begin speculative'
7881 	       or transform it into 'be in speculative'.  */
7882 	    {
7883 	      if (/* In try_ready we assert that if insn once became ready
7884 		     it can be removed from the ready (or queue) list only
7885 		     due to backend decision.  Hence we can't let the
7886 		     probability of the speculative dep to decrease.  */
7887 		  ds_weak (ds) <= ds_weak (fs))
7888 		{
7889 		  ds_t new_ds;
7890 
7891 		  new_ds = (ds & ~BEGIN_SPEC) | fs;
7892 
7893 		  if (/* consumer can 'be in speculative'.  */
7894 		      sched_insn_is_legitimate_for_speculation_p (consumer,
7895 								  new_ds))
7896 		    /* Transform it to be in speculative.  */
7897 		    ds = new_ds;
7898 		}
7899 	    }
7900 	  else
7901 	    /* Mark the dep as 'be in speculative'.  */
7902 	    ds |= fs;
7903 	}
7904 
7905       {
7906 	dep_def _new_dep, *new_dep = &_new_dep;
7907 
7908 	init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
7909 	sd_add_dep (new_dep, false);
7910       }
7911     }
7912 }
7913 
7914 /* Generates recovery code for BEGIN speculative INSN.  */
7915 static void
begin_speculative_block(rtx_insn * insn)7916 begin_speculative_block (rtx_insn *insn)
7917 {
7918   if (TODO_SPEC (insn) & BEGIN_DATA)
7919     nr_begin_data++;
7920   if (TODO_SPEC (insn) & BEGIN_CONTROL)
7921     nr_begin_control++;
7922 
7923   create_check_block_twin (insn, false);
7924 
7925   TODO_SPEC (insn) &= ~BEGIN_SPEC;
7926 }
7927 
7928 static void haifa_init_insn (rtx_insn *);
7929 
7930 /* Generates recovery code for BE_IN speculative INSN.  */
7931 static void
add_to_speculative_block(rtx_insn * insn)7932 add_to_speculative_block (rtx_insn *insn)
7933 {
7934   ds_t ts;
7935   sd_iterator_def sd_it;
7936   dep_t dep;
7937   auto_vec<rtx_insn *, 10> twins;
7938 
7939   ts = TODO_SPEC (insn);
7940   gcc_assert (!(ts & ~BE_IN_SPEC));
7941 
7942   if (ts & BE_IN_DATA)
7943     nr_be_in_data++;
7944   if (ts & BE_IN_CONTROL)
7945     nr_be_in_control++;
7946 
7947   TODO_SPEC (insn) &= ~BE_IN_SPEC;
7948   gcc_assert (!TODO_SPEC (insn));
7949 
7950   DONE_SPEC (insn) |= ts;
7951 
7952   /* First we convert all simple checks to branchy.  */
7953   for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7954        sd_iterator_cond (&sd_it, &dep);)
7955     {
7956       rtx_insn *check = DEP_PRO (dep);
7957 
7958       if (IS_SPECULATION_SIMPLE_CHECK_P (check))
7959 	{
7960 	  create_check_block_twin (check, true);
7961 
7962 	  /* Restart search.  */
7963 	  sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7964 	}
7965       else
7966 	/* Continue search.  */
7967 	sd_iterator_next (&sd_it);
7968     }
7969 
7970   auto_vec<rtx_insn *> priorities_roots;
7971   clear_priorities (insn, &priorities_roots);
7972 
7973   while (1)
7974     {
7975       rtx_insn *check, *twin;
7976       basic_block rec;
7977 
7978       /* Get the first backward dependency of INSN.  */
7979       sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7980       if (!sd_iterator_cond (&sd_it, &dep))
7981 	/* INSN has no backward dependencies left.  */
7982 	break;
7983 
7984       gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
7985 		  && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
7986 		  && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
7987 
7988       check = DEP_PRO (dep);
7989 
7990       gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
7991 		  && QUEUE_INDEX (check) == QUEUE_NOWHERE);
7992 
7993       rec = BLOCK_FOR_INSN (check);
7994 
7995       twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
7996       haifa_init_insn (twin);
7997 
7998       sd_copy_back_deps (twin, insn, true);
7999 
8000       if (sched_verbose && spec_info->dump)
8001         /* INSN_BB (insn) isn't determined for twin insns yet.
8002            So we can't use current_sched_info->print_insn.  */
8003         fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
8004                  INSN_UID (twin), rec->index);
8005 
8006       twins.safe_push (twin);
8007 
8008       /* Add dependences between TWIN and all appropriate
8009 	 instructions from REC.  */
8010       FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
8011 	{
8012 	  rtx_insn *pro = DEP_PRO (dep);
8013 
8014 	  gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
8015 
8016 	  /* INSN might have dependencies from the instructions from
8017 	     several recovery blocks.  At this iteration we process those
8018 	     producers that reside in REC.  */
8019 	  if (BLOCK_FOR_INSN (pro) == rec)
8020 	    {
8021 	      dep_def _new_dep, *new_dep = &_new_dep;
8022 
8023 	      init_dep (new_dep, pro, twin, REG_DEP_TRUE);
8024 	      sd_add_dep (new_dep, false);
8025 	    }
8026 	}
8027 
8028       process_insn_forw_deps_be_in_spec (insn, twin, ts);
8029 
8030       /* Remove all dependencies between INSN and insns in REC.  */
8031       for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
8032 	   sd_iterator_cond (&sd_it, &dep);)
8033 	{
8034 	  rtx_insn *pro = DEP_PRO (dep);
8035 
8036 	  if (BLOCK_FOR_INSN (pro) == rec)
8037 	    sd_delete_dep (sd_it);
8038 	  else
8039 	    sd_iterator_next (&sd_it);
8040 	}
8041     }
8042 
8043   /* We couldn't have added the dependencies between INSN and TWINS earlier
8044      because that would make TWINS appear in the INSN_BACK_DEPS (INSN).  */
8045   unsigned int i;
8046   rtx_insn *twin;
8047   FOR_EACH_VEC_ELT_REVERSE (twins, i, twin)
8048     {
8049       dep_def _new_dep, *new_dep = &_new_dep;
8050 
8051       init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
8052       sd_add_dep (new_dep, false);
8053     }
8054 
8055   calc_priorities (priorities_roots);
8056 }
8057 
8058 /* Extends and fills with zeros (only the new part) array pointed to by P.  */
8059 void *
xrecalloc(void * p,size_t new_nmemb,size_t old_nmemb,size_t size)8060 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
8061 {
8062   gcc_assert (new_nmemb >= old_nmemb);
8063   p = XRESIZEVAR (void, p, new_nmemb * size);
8064   memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
8065   return p;
8066 }
8067 
8068 /* Helper function.
8069    Find fallthru edge from PRED.  */
8070 edge
find_fallthru_edge_from(basic_block pred)8071 find_fallthru_edge_from (basic_block pred)
8072 {
8073   edge e;
8074   basic_block succ;
8075 
8076   succ = pred->next_bb;
8077   gcc_assert (succ->prev_bb == pred);
8078 
8079   if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
8080     {
8081       e = find_fallthru_edge (pred->succs);
8082 
8083       if (e)
8084 	{
8085 	  gcc_assert (e->dest == succ || e->dest->index == EXIT_BLOCK);
8086 	  return e;
8087 	}
8088     }
8089   else
8090     {
8091       e = find_fallthru_edge (succ->preds);
8092 
8093       if (e)
8094 	{
8095 	  gcc_assert (e->src == pred);
8096 	  return e;
8097 	}
8098     }
8099 
8100   return NULL;
8101 }
8102 
8103 /* Extend per basic block data structures.  */
8104 static void
sched_extend_bb(void)8105 sched_extend_bb (void)
8106 {
8107   /* The following is done to keep current_sched_info->next_tail non null.  */
8108   rtx_insn *end = BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
8109   rtx_insn *insn = DEBUG_INSN_P (end) ? prev_nondebug_insn (end) : end;
8110   if (NEXT_INSN (end) == 0
8111       || (!NOTE_P (insn)
8112 	  && !LABEL_P (insn)
8113 	  /* Don't emit a NOTE if it would end up before a BARRIER.  */
8114 	  && !BARRIER_P (next_nondebug_insn (end))))
8115     {
8116       rtx_note *note = emit_note_after (NOTE_INSN_DELETED, end);
8117       /* Make note appear outside BB.  */
8118       set_block_for_insn (note, NULL);
8119       BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb) = end;
8120     }
8121 }
8122 
8123 /* Init per basic block data structures.  */
8124 void
sched_init_bbs(void)8125 sched_init_bbs (void)
8126 {
8127   sched_extend_bb ();
8128 }
8129 
8130 /* Initialize BEFORE_RECOVERY variable.  */
8131 static void
init_before_recovery(basic_block * before_recovery_ptr)8132 init_before_recovery (basic_block *before_recovery_ptr)
8133 {
8134   basic_block last;
8135   edge e;
8136 
8137   last = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
8138   e = find_fallthru_edge_from (last);
8139 
8140   if (e)
8141     {
8142       /* We create two basic blocks:
8143          1. Single instruction block is inserted right after E->SRC
8144          and has jump to
8145          2. Empty block right before EXIT_BLOCK.
8146          Between these two blocks recovery blocks will be emitted.  */
8147 
8148       basic_block single, empty;
8149 
8150       /* If the fallthrough edge to exit we've found is from the block we've
8151 	 created before, don't do anything more.  */
8152       if (last == after_recovery)
8153 	return;
8154 
8155       adding_bb_to_current_region_p = false;
8156 
8157       single = sched_create_empty_bb (last);
8158       empty = sched_create_empty_bb (single);
8159 
8160       /* Add new blocks to the root loop.  */
8161       if (current_loops != NULL)
8162 	{
8163 	  add_bb_to_loop (single, (*current_loops->larray)[0]);
8164 	  add_bb_to_loop (empty, (*current_loops->larray)[0]);
8165 	}
8166 
8167       single->count = last->count;
8168       empty->count = last->count;
8169       BB_COPY_PARTITION (single, last);
8170       BB_COPY_PARTITION (empty, last);
8171 
8172       redirect_edge_succ (e, single);
8173       make_single_succ_edge (single, empty, 0);
8174       make_single_succ_edge (empty, EXIT_BLOCK_PTR_FOR_FN (cfun),
8175 			     EDGE_FALLTHRU);
8176 
8177       rtx_code_label *label = block_label (empty);
8178       rtx_jump_insn *x = emit_jump_insn_after (targetm.gen_jump (label),
8179 					       BB_END (single));
8180       JUMP_LABEL (x) = label;
8181       LABEL_NUSES (label)++;
8182       haifa_init_insn (x);
8183 
8184       emit_barrier_after (x);
8185 
8186       sched_init_only_bb (empty, NULL);
8187       sched_init_only_bb (single, NULL);
8188       sched_extend_bb ();
8189 
8190       adding_bb_to_current_region_p = true;
8191       before_recovery = single;
8192       after_recovery = empty;
8193 
8194       if (before_recovery_ptr)
8195         *before_recovery_ptr = before_recovery;
8196 
8197       if (sched_verbose >= 2 && spec_info->dump)
8198         fprintf (spec_info->dump,
8199 		 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
8200                  last->index, single->index, empty->index);
8201     }
8202   else
8203     before_recovery = last;
8204 }
8205 
8206 /* Returns new recovery block.  */
8207 basic_block
sched_create_recovery_block(basic_block * before_recovery_ptr)8208 sched_create_recovery_block (basic_block *before_recovery_ptr)
8209 {
8210   rtx_insn *barrier;
8211   basic_block rec;
8212 
8213   haifa_recovery_bb_recently_added_p = true;
8214   haifa_recovery_bb_ever_added_p = true;
8215 
8216   init_before_recovery (before_recovery_ptr);
8217 
8218   barrier = get_last_bb_insn (before_recovery);
8219   gcc_assert (BARRIER_P (barrier));
8220 
8221   rtx_insn *label = emit_label_after (gen_label_rtx (), barrier);
8222 
8223   rec = create_basic_block (label, label, before_recovery);
8224 
8225   /* A recovery block always ends with an unconditional jump.  */
8226   emit_barrier_after (BB_END (rec));
8227 
8228   if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
8229     BB_SET_PARTITION (rec, BB_COLD_PARTITION);
8230 
8231   if (sched_verbose && spec_info->dump)
8232     fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
8233              rec->index);
8234 
8235   return rec;
8236 }
8237 
8238 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
8239    and emit necessary jumps.  */
8240 void
sched_create_recovery_edges(basic_block first_bb,basic_block rec,basic_block second_bb)8241 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
8242 			     basic_block second_bb)
8243 {
8244   int edge_flags;
8245 
8246   /* This is fixing of incoming edge.  */
8247   /* ??? Which other flags should be specified?  */
8248   if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
8249     /* Partition type is the same, if it is "unpartitioned".  */
8250     edge_flags = EDGE_CROSSING;
8251   else
8252     edge_flags = 0;
8253 
8254   edge e2 = single_succ_edge (first_bb);
8255   edge e = make_edge (first_bb, rec, edge_flags);
8256 
8257   /* TODO: The actual probability can be determined and is computed as
8258      'todo_spec' variable in create_check_block_twin and
8259      in sel-sched.c `check_ds' in create_speculation_check.  */
8260   e->probability = profile_probability::very_unlikely ();
8261   rec->count = e->count ();
8262   e2->probability = e->probability.invert ();
8263 
8264   rtx_code_label *label = block_label (second_bb);
8265   rtx_jump_insn *jump = emit_jump_insn_after (targetm.gen_jump (label),
8266 					      BB_END (rec));
8267   JUMP_LABEL (jump) = label;
8268   LABEL_NUSES (label)++;
8269 
8270   if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
8271     /* Partition type is the same, if it is "unpartitioned".  */
8272     {
8273       /* Rewritten from cfgrtl.c.  */
8274       if (crtl->has_bb_partition && targetm_common.have_named_sections)
8275 	{
8276 	  /* We don't need the same note for the check because
8277 	     any_condjump_p (check) == true.  */
8278 	  CROSSING_JUMP_P (jump) = 1;
8279 	}
8280       edge_flags = EDGE_CROSSING;
8281     }
8282   else
8283     edge_flags = 0;
8284 
8285   make_single_succ_edge (rec, second_bb, edge_flags);
8286   if (dom_info_available_p (CDI_DOMINATORS))
8287     set_immediate_dominator (CDI_DOMINATORS, rec, first_bb);
8288 }
8289 
8290 /* This function creates recovery code for INSN.  If MUTATE_P is nonzero,
8291    INSN is a simple check, that should be converted to branchy one.  */
8292 static void
create_check_block_twin(rtx_insn * insn,bool mutate_p)8293 create_check_block_twin (rtx_insn *insn, bool mutate_p)
8294 {
8295   basic_block rec;
8296   rtx_insn *label, *check, *twin;
8297   rtx check_pat;
8298   ds_t fs;
8299   sd_iterator_def sd_it;
8300   dep_t dep;
8301   dep_def _new_dep, *new_dep = &_new_dep;
8302   ds_t todo_spec;
8303 
8304   gcc_assert (ORIG_PAT (insn) != NULL_RTX);
8305 
8306   if (!mutate_p)
8307     todo_spec = TODO_SPEC (insn);
8308   else
8309     {
8310       gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
8311 		  && (TODO_SPEC (insn) & SPECULATIVE) == 0);
8312 
8313       todo_spec = CHECK_SPEC (insn);
8314     }
8315 
8316   todo_spec &= SPECULATIVE;
8317 
8318   /* Create recovery block.  */
8319   if (mutate_p || targetm.sched.needs_block_p (todo_spec))
8320     {
8321       rec = sched_create_recovery_block (NULL);
8322       label = BB_HEAD (rec);
8323     }
8324   else
8325     {
8326       rec = EXIT_BLOCK_PTR_FOR_FN (cfun);
8327       label = NULL;
8328     }
8329 
8330   /* Emit CHECK.  */
8331   check_pat = targetm.sched.gen_spec_check (insn, label, todo_spec);
8332 
8333   if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8334     {
8335       /* To have mem_reg alive at the beginning of second_bb,
8336 	 we emit check BEFORE insn, so insn after splitting
8337 	 insn will be at the beginning of second_bb, which will
8338 	 provide us with the correct life information.  */
8339       check = emit_jump_insn_before (check_pat, insn);
8340       JUMP_LABEL (check) = label;
8341       LABEL_NUSES (label)++;
8342     }
8343   else
8344     check = emit_insn_before (check_pat, insn);
8345 
8346   /* Extend data structures.  */
8347   haifa_init_insn (check);
8348 
8349   /* CHECK is being added to current region.  Extend ready list.  */
8350   gcc_assert (sched_ready_n_insns != -1);
8351   sched_extend_ready_list (sched_ready_n_insns + 1);
8352 
8353   if (current_sched_info->add_remove_insn)
8354     current_sched_info->add_remove_insn (insn, 0);
8355 
8356   RECOVERY_BLOCK (check) = rec;
8357 
8358   if (sched_verbose && spec_info->dump)
8359     fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
8360              (*current_sched_info->print_insn) (check, 0));
8361 
8362   gcc_assert (ORIG_PAT (insn));
8363 
8364   /* Initialize TWIN (twin is a duplicate of original instruction
8365      in the recovery block).  */
8366   if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8367     {
8368       sd_iterator_def sd_it;
8369       dep_t dep;
8370 
8371       FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
8372 	if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
8373 	  {
8374 	    struct _dep _dep2, *dep2 = &_dep2;
8375 
8376 	    init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
8377 
8378 	    sd_add_dep (dep2, true);
8379 	  }
8380 
8381       twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
8382       haifa_init_insn (twin);
8383 
8384       if (sched_verbose && spec_info->dump)
8385 	/* INSN_BB (insn) isn't determined for twin insns yet.
8386 	   So we can't use current_sched_info->print_insn.  */
8387 	fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
8388 		 INSN_UID (twin), rec->index);
8389     }
8390   else
8391     {
8392       ORIG_PAT (check) = ORIG_PAT (insn);
8393       HAS_INTERNAL_DEP (check) = 1;
8394       twin = check;
8395       /* ??? We probably should change all OUTPUT dependencies to
8396 	 (TRUE | OUTPUT).  */
8397     }
8398 
8399   /* Copy all resolved back dependencies of INSN to TWIN.  This will
8400      provide correct value for INSN_TICK (TWIN).  */
8401   sd_copy_back_deps (twin, insn, true);
8402 
8403   if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8404     /* In case of branchy check, fix CFG.  */
8405     {
8406       basic_block first_bb, second_bb;
8407       rtx_insn *jump;
8408 
8409       first_bb = BLOCK_FOR_INSN (check);
8410       second_bb = sched_split_block (first_bb, check);
8411 
8412       sched_create_recovery_edges (first_bb, rec, second_bb);
8413 
8414       sched_init_only_bb (second_bb, first_bb);
8415       sched_init_only_bb (rec, EXIT_BLOCK_PTR_FOR_FN (cfun));
8416 
8417       jump = BB_END (rec);
8418       haifa_init_insn (jump);
8419     }
8420 
8421   /* Move backward dependences from INSN to CHECK and
8422      move forward dependences from INSN to TWIN.  */
8423 
8424   /* First, create dependencies between INSN's producers and CHECK & TWIN.  */
8425   FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
8426     {
8427       rtx_insn *pro = DEP_PRO (dep);
8428       ds_t ds;
8429 
8430       /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
8431 	 check --TRUE--> producer  ??? or ANTI ???
8432 	 twin  --TRUE--> producer
8433 	 twin  --ANTI--> check
8434 
8435 	 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
8436 	 check --ANTI--> producer
8437 	 twin  --ANTI--> producer
8438 	 twin  --ANTI--> check
8439 
8440 	 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
8441 	 check ~~TRUE~~> producer
8442 	 twin  ~~TRUE~~> producer
8443 	 twin  --ANTI--> check  */
8444 
8445       ds = DEP_STATUS (dep);
8446 
8447       if (ds & BEGIN_SPEC)
8448 	{
8449 	  gcc_assert (!mutate_p);
8450 	  ds &= ~BEGIN_SPEC;
8451 	}
8452 
8453       init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
8454       sd_add_dep (new_dep, false);
8455 
8456       if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8457 	{
8458 	  DEP_CON (new_dep) = twin;
8459 	  sd_add_dep (new_dep, false);
8460 	}
8461     }
8462 
8463   /* Second, remove backward dependencies of INSN.  */
8464   for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
8465        sd_iterator_cond (&sd_it, &dep);)
8466     {
8467       if ((DEP_STATUS (dep) & BEGIN_SPEC)
8468 	  || mutate_p)
8469 	/* We can delete this dep because we overcome it with
8470 	   BEGIN_SPECULATION.  */
8471 	sd_delete_dep (sd_it);
8472       else
8473 	sd_iterator_next (&sd_it);
8474     }
8475 
8476   /* Future Speculations.  Determine what BE_IN speculations will be like.  */
8477   fs = 0;
8478 
8479   /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
8480      here.  */
8481 
8482   gcc_assert (!DONE_SPEC (insn));
8483 
8484   if (!mutate_p)
8485     {
8486       ds_t ts = TODO_SPEC (insn);
8487 
8488       DONE_SPEC (insn) = ts & BEGIN_SPEC;
8489       CHECK_SPEC (check) = ts & BEGIN_SPEC;
8490 
8491       /* Luckiness of future speculations solely depends upon initial
8492 	 BEGIN speculation.  */
8493       if (ts & BEGIN_DATA)
8494 	fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
8495       if (ts & BEGIN_CONTROL)
8496 	fs = set_dep_weak (fs, BE_IN_CONTROL,
8497 			   get_dep_weak (ts, BEGIN_CONTROL));
8498     }
8499   else
8500     CHECK_SPEC (check) = CHECK_SPEC (insn);
8501 
8502   /* Future speculations: call the helper.  */
8503   process_insn_forw_deps_be_in_spec (insn, twin, fs);
8504 
8505   if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8506     {
8507       /* Which types of dependencies should we use here is,
8508 	 generally, machine-dependent question...  But, for now,
8509 	 it is not.  */
8510 
8511       if (!mutate_p)
8512 	{
8513 	  init_dep (new_dep, insn, check, REG_DEP_TRUE);
8514 	  sd_add_dep (new_dep, false);
8515 
8516 	  init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
8517 	  sd_add_dep (new_dep, false);
8518 	}
8519       else
8520 	{
8521 	  if (spec_info->dump)
8522 	    fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
8523 		     (*current_sched_info->print_insn) (insn, 0));
8524 
8525 	  /* Remove all dependencies of the INSN.  */
8526 	  {
8527 	    sd_it = sd_iterator_start (insn, (SD_LIST_FORW
8528 					      | SD_LIST_BACK
8529 					      | SD_LIST_RES_BACK));
8530 	    while (sd_iterator_cond (&sd_it, &dep))
8531 	      sd_delete_dep (sd_it);
8532 	  }
8533 
8534 	  /* If former check (INSN) already was moved to the ready (or queue)
8535 	     list, add new check (CHECK) there too.  */
8536 	  if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
8537 	    try_ready (check);
8538 
8539 	  /* Remove old check from instruction stream and free its
8540 	     data.  */
8541 	  sched_remove_insn (insn);
8542 	}
8543 
8544       init_dep (new_dep, check, twin, REG_DEP_ANTI);
8545       sd_add_dep (new_dep, false);
8546     }
8547   else
8548     {
8549       init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
8550       sd_add_dep (new_dep, false);
8551     }
8552 
8553   if (!mutate_p)
8554     /* Fix priorities.  If MUTATE_P is nonzero, this is not necessary,
8555        because it'll be done later in add_to_speculative_block.  */
8556     {
8557       auto_vec<rtx_insn *> priorities_roots;
8558 
8559       clear_priorities (twin, &priorities_roots);
8560       calc_priorities (priorities_roots);
8561     }
8562 }
8563 
8564 /* Removes dependency between instructions in the recovery block REC
8565    and usual region instructions.  It keeps inner dependences so it
8566    won't be necessary to recompute them.  */
8567 static void
fix_recovery_deps(basic_block rec)8568 fix_recovery_deps (basic_block rec)
8569 {
8570   rtx_insn *note, *insn, *jump;
8571   auto_vec<rtx_insn *, 10> ready_list;
8572   auto_bitmap in_ready;
8573 
8574   /* NOTE - a basic block note.  */
8575   note = NEXT_INSN (BB_HEAD (rec));
8576   gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8577   insn = BB_END (rec);
8578   gcc_assert (JUMP_P (insn));
8579   insn = PREV_INSN (insn);
8580 
8581   do
8582     {
8583       sd_iterator_def sd_it;
8584       dep_t dep;
8585 
8586       for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
8587 	   sd_iterator_cond (&sd_it, &dep);)
8588 	{
8589 	  rtx_insn *consumer = DEP_CON (dep);
8590 
8591 	  if (BLOCK_FOR_INSN (consumer) != rec)
8592 	    {
8593 	      sd_delete_dep (sd_it);
8594 
8595 	      if (bitmap_set_bit (in_ready, INSN_LUID (consumer)))
8596 		ready_list.safe_push (consumer);
8597 	    }
8598 	  else
8599 	    {
8600 	      gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
8601 
8602 	      sd_iterator_next (&sd_it);
8603 	    }
8604 	}
8605 
8606       insn = PREV_INSN (insn);
8607     }
8608   while (insn != note);
8609 
8610   /* Try to add instructions to the ready or queue list.  */
8611   unsigned int i;
8612   rtx_insn *temp;
8613   FOR_EACH_VEC_ELT_REVERSE (ready_list, i, temp)
8614     try_ready (temp);
8615 
8616   /* Fixing jump's dependences.  */
8617   insn = BB_HEAD (rec);
8618   jump = BB_END (rec);
8619 
8620   gcc_assert (LABEL_P (insn));
8621   insn = NEXT_INSN (insn);
8622 
8623   gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
8624   add_jump_dependencies (insn, jump);
8625 }
8626 
8627 /* Change pattern of INSN to NEW_PAT.  Invalidate cached haifa
8628    instruction data.  */
8629 static bool
haifa_change_pattern(rtx_insn * insn,rtx new_pat)8630 haifa_change_pattern (rtx_insn *insn, rtx new_pat)
8631 {
8632   int t;
8633 
8634   t = validate_change (insn, &PATTERN (insn), new_pat, 0);
8635   if (!t)
8636     return false;
8637 
8638   update_insn_after_change (insn);
8639   return true;
8640 }
8641 
8642 /* -1 - can't speculate,
8643    0 - for speculation with REQUEST mode it is OK to use
8644    current instruction pattern,
8645    1 - need to change pattern for *NEW_PAT to be speculative.  */
8646 int
sched_speculate_insn(rtx_insn * insn,ds_t request,rtx * new_pat)8647 sched_speculate_insn (rtx_insn *insn, ds_t request, rtx *new_pat)
8648 {
8649   gcc_assert (current_sched_info->flags & DO_SPECULATION
8650               && (request & SPECULATIVE)
8651 	      && sched_insn_is_legitimate_for_speculation_p (insn, request));
8652 
8653   if ((request & spec_info->mask) != request)
8654     return -1;
8655 
8656   if (request & BE_IN_SPEC
8657       && !(request & BEGIN_SPEC))
8658     return 0;
8659 
8660   return targetm.sched.speculate_insn (insn, request, new_pat);
8661 }
8662 
8663 static int
haifa_speculate_insn(rtx_insn * insn,ds_t request,rtx * new_pat)8664 haifa_speculate_insn (rtx_insn *insn, ds_t request, rtx *new_pat)
8665 {
8666   gcc_assert (sched_deps_info->generate_spec_deps
8667 	      && !IS_SPECULATION_CHECK_P (insn));
8668 
8669   if (HAS_INTERNAL_DEP (insn)
8670       || SCHED_GROUP_P (insn))
8671     return -1;
8672 
8673   return sched_speculate_insn (insn, request, new_pat);
8674 }
8675 
8676 /* Print some information about block BB, which starts with HEAD and
8677    ends with TAIL, before scheduling it.
8678    I is zero, if scheduler is about to start with the fresh ebb.  */
8679 static void
dump_new_block_header(int i,basic_block bb,rtx_insn * head,rtx_insn * tail)8680 dump_new_block_header (int i, basic_block bb, rtx_insn *head, rtx_insn *tail)
8681 {
8682   if (!i)
8683     fprintf (sched_dump,
8684 	     ";;   ======================================================\n");
8685   else
8686     fprintf (sched_dump,
8687 	     ";;   =====================ADVANCING TO=====================\n");
8688   fprintf (sched_dump,
8689 	   ";;   -- basic block %d from %d to %d -- %s reload\n",
8690 	   bb->index, INSN_UID (head), INSN_UID (tail),
8691 	   (reload_completed ? "after" : "before"));
8692   fprintf (sched_dump,
8693 	   ";;   ======================================================\n");
8694   fprintf (sched_dump, "\n");
8695 }
8696 
8697 /* Unlink basic block notes and labels and saves them, so they
8698    can be easily restored.  We unlink basic block notes in EBB to
8699    provide back-compatibility with the previous code, as target backends
8700    assume, that there'll be only instructions between
8701    current_sched_info->{head and tail}.  We restore these notes as soon
8702    as we can.
8703    FIRST (LAST) is the first (last) basic block in the ebb.
8704    NB: In usual case (FIRST == LAST) nothing is really done.  */
8705 void
unlink_bb_notes(basic_block first,basic_block last)8706 unlink_bb_notes (basic_block first, basic_block last)
8707 {
8708   /* We DON'T unlink basic block notes of the first block in the ebb.  */
8709   if (first == last)
8710     return;
8711 
8712   bb_header = XNEWVEC (rtx_insn *, last_basic_block_for_fn (cfun));
8713 
8714   /* Make a sentinel.  */
8715   if (last->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
8716     bb_header[last->next_bb->index] = 0;
8717 
8718   first = first->next_bb;
8719   do
8720     {
8721       rtx_insn *prev, *label, *note, *next;
8722 
8723       label = BB_HEAD (last);
8724       if (LABEL_P (label))
8725 	note = NEXT_INSN (label);
8726       else
8727 	note = label;
8728       gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8729 
8730       prev = PREV_INSN (label);
8731       next = NEXT_INSN (note);
8732       gcc_assert (prev && next);
8733 
8734       SET_NEXT_INSN (prev) = next;
8735       SET_PREV_INSN (next) = prev;
8736 
8737       bb_header[last->index] = label;
8738 
8739       if (last == first)
8740 	break;
8741 
8742       last = last->prev_bb;
8743     }
8744   while (1);
8745 }
8746 
8747 /* Restore basic block notes.
8748    FIRST is the first basic block in the ebb.  */
8749 static void
restore_bb_notes(basic_block first)8750 restore_bb_notes (basic_block first)
8751 {
8752   if (!bb_header)
8753     return;
8754 
8755   /* We DON'T unlink basic block notes of the first block in the ebb.  */
8756   first = first->next_bb;
8757   /* Remember: FIRST is actually a second basic block in the ebb.  */
8758 
8759   while (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
8760 	 && bb_header[first->index])
8761     {
8762       rtx_insn *prev, *label, *note, *next;
8763 
8764       label = bb_header[first->index];
8765       prev = PREV_INSN (label);
8766       next = NEXT_INSN (prev);
8767 
8768       if (LABEL_P (label))
8769 	note = NEXT_INSN (label);
8770       else
8771 	note = label;
8772       gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8773 
8774       bb_header[first->index] = 0;
8775 
8776       SET_NEXT_INSN (prev) = label;
8777       SET_NEXT_INSN (note) = next;
8778       SET_PREV_INSN (next) = note;
8779 
8780       first = first->next_bb;
8781     }
8782 
8783   free (bb_header);
8784   bb_header = 0;
8785 }
8786 
8787 /* Helper function.
8788    Fix CFG after both in- and inter-block movement of
8789    control_flow_insn_p JUMP.  */
8790 static void
fix_jump_move(rtx_insn * jump)8791 fix_jump_move (rtx_insn *jump)
8792 {
8793   basic_block bb, jump_bb, jump_bb_next;
8794 
8795   bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8796   jump_bb = BLOCK_FOR_INSN (jump);
8797   jump_bb_next = jump_bb->next_bb;
8798 
8799   gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
8800 	      || IS_SPECULATION_BRANCHY_CHECK_P (jump));
8801 
8802   if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
8803     /* if jump_bb_next is not empty.  */
8804     BB_END (jump_bb) = BB_END (jump_bb_next);
8805 
8806   if (BB_END (bb) != PREV_INSN (jump))
8807     /* Then there are instruction after jump that should be placed
8808        to jump_bb_next.  */
8809     BB_END (jump_bb_next) = BB_END (bb);
8810   else
8811     /* Otherwise jump_bb_next is empty.  */
8812     BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
8813 
8814   /* To make assertion in move_insn happy.  */
8815   BB_END (bb) = PREV_INSN (jump);
8816 
8817   update_bb_for_insn (jump_bb_next);
8818 }
8819 
8820 /* Fix CFG after interblock movement of control_flow_insn_p JUMP.  */
8821 static void
move_block_after_check(rtx_insn * jump)8822 move_block_after_check (rtx_insn *jump)
8823 {
8824   basic_block bb, jump_bb, jump_bb_next;
8825   vec<edge, va_gc> *t;
8826 
8827   bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8828   jump_bb = BLOCK_FOR_INSN (jump);
8829   jump_bb_next = jump_bb->next_bb;
8830 
8831   update_bb_for_insn (jump_bb);
8832 
8833   gcc_assert (IS_SPECULATION_CHECK_P (jump)
8834 	      || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
8835 
8836   unlink_block (jump_bb_next);
8837   link_block (jump_bb_next, bb);
8838 
8839   t = bb->succs;
8840   bb->succs = 0;
8841   move_succs (&(jump_bb->succs), bb);
8842   move_succs (&(jump_bb_next->succs), jump_bb);
8843   move_succs (&t, jump_bb_next);
8844 
8845   df_mark_solutions_dirty ();
8846 
8847   common_sched_info->fix_recovery_cfg
8848     (bb->index, jump_bb->index, jump_bb_next->index);
8849 }
8850 
8851 /* Helper function for move_block_after_check.
8852    This functions attaches edge vector pointed to by SUCCSP to
8853    block TO.  */
8854 static void
move_succs(vec<edge,va_gc> ** succsp,basic_block to)8855 move_succs (vec<edge, va_gc> **succsp, basic_block to)
8856 {
8857   edge e;
8858   edge_iterator ei;
8859 
8860   gcc_assert (to->succs == 0);
8861 
8862   to->succs = *succsp;
8863 
8864   FOR_EACH_EDGE (e, ei, to->succs)
8865     e->src = to;
8866 
8867   *succsp = 0;
8868 }
8869 
8870 /* Remove INSN from the instruction stream.
8871    INSN should have any dependencies.  */
8872 static void
sched_remove_insn(rtx_insn * insn)8873 sched_remove_insn (rtx_insn *insn)
8874 {
8875   sd_finish_insn (insn);
8876 
8877   change_queue_index (insn, QUEUE_NOWHERE);
8878   current_sched_info->add_remove_insn (insn, 1);
8879   delete_insn (insn);
8880 }
8881 
8882 /* Clear priorities of all instructions, that are forward dependent on INSN.
8883    Store in vector pointed to by ROOTS_PTR insns on which priority () should
8884    be invoked to initialize all cleared priorities.  */
8885 static void
clear_priorities(rtx_insn * insn,rtx_vec_t * roots_ptr)8886 clear_priorities (rtx_insn *insn, rtx_vec_t *roots_ptr)
8887 {
8888   sd_iterator_def sd_it;
8889   dep_t dep;
8890   bool insn_is_root_p = true;
8891 
8892   gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
8893 
8894   FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
8895     {
8896       rtx_insn *pro = DEP_PRO (dep);
8897 
8898       if (INSN_PRIORITY_STATUS (pro) >= 0
8899 	  && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
8900 	{
8901 	  /* If DEP doesn't contribute to priority then INSN itself should
8902 	     be added to priority roots.  */
8903 	  if (contributes_to_priority_p (dep))
8904 	    insn_is_root_p = false;
8905 
8906 	  INSN_PRIORITY_STATUS (pro) = -1;
8907 	  clear_priorities (pro, roots_ptr);
8908 	}
8909     }
8910 
8911   if (insn_is_root_p)
8912     roots_ptr->safe_push (insn);
8913 }
8914 
8915 /* Recompute priorities of instructions, whose priorities might have been
8916    changed.  ROOTS is a vector of instructions whose priority computation will
8917    trigger initialization of all cleared priorities.  */
8918 static void
calc_priorities(rtx_vec_t roots)8919 calc_priorities (rtx_vec_t roots)
8920 {
8921   int i;
8922   rtx_insn *insn;
8923 
8924   FOR_EACH_VEC_ELT (roots, i, insn)
8925     priority (insn);
8926 }
8927 
8928 
8929 /* Add dependences between JUMP and other instructions in the recovery
8930    block.  INSN is the first insn the recovery block.  */
8931 static void
add_jump_dependencies(rtx_insn * insn,rtx_insn * jump)8932 add_jump_dependencies (rtx_insn *insn, rtx_insn *jump)
8933 {
8934   do
8935     {
8936       insn = NEXT_INSN (insn);
8937       if (insn == jump)
8938 	break;
8939 
8940       if (dep_list_size (insn, SD_LIST_FORW) == 0)
8941 	{
8942 	  dep_def _new_dep, *new_dep = &_new_dep;
8943 
8944 	  init_dep (new_dep, insn, jump, REG_DEP_ANTI);
8945 	  sd_add_dep (new_dep, false);
8946 	}
8947     }
8948   while (1);
8949 
8950   gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
8951 }
8952 
8953 /* Extend data structures for logical insn UID.  */
8954 void
sched_extend_luids(void)8955 sched_extend_luids (void)
8956 {
8957   int new_luids_max_uid = get_max_uid () + 1;
8958 
8959   sched_luids.safe_grow_cleared (new_luids_max_uid);
8960 }
8961 
8962 /* Initialize LUID for INSN.  */
8963 void
sched_init_insn_luid(rtx_insn * insn)8964 sched_init_insn_luid (rtx_insn *insn)
8965 {
8966   int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
8967   int luid;
8968 
8969   if (i >= 0)
8970     {
8971       luid = sched_max_luid;
8972       sched_max_luid += i;
8973     }
8974   else
8975     luid = -1;
8976 
8977   SET_INSN_LUID (insn, luid);
8978 }
8979 
8980 /* Initialize luids for BBS.
8981    The hook common_sched_info->luid_for_non_insn () is used to determine
8982    if notes, labels, etc. need luids.  */
8983 void
sched_init_luids(bb_vec_t bbs)8984 sched_init_luids (bb_vec_t bbs)
8985 {
8986   int i;
8987   basic_block bb;
8988 
8989   sched_extend_luids ();
8990   FOR_EACH_VEC_ELT (bbs, i, bb)
8991     {
8992       rtx_insn *insn;
8993 
8994       FOR_BB_INSNS (bb, insn)
8995 	sched_init_insn_luid (insn);
8996     }
8997 }
8998 
8999 /* Free LUIDs.  */
9000 void
sched_finish_luids(void)9001 sched_finish_luids (void)
9002 {
9003   sched_luids.release ();
9004   sched_max_luid = 1;
9005 }
9006 
9007 /* Return logical uid of INSN.  Helpful while debugging.  */
9008 int
insn_luid(rtx_insn * insn)9009 insn_luid (rtx_insn *insn)
9010 {
9011   return INSN_LUID (insn);
9012 }
9013 
9014 /* Extend per insn data in the target.  */
9015 void
sched_extend_target(void)9016 sched_extend_target (void)
9017 {
9018   if (targetm.sched.h_i_d_extended)
9019     targetm.sched.h_i_d_extended ();
9020 }
9021 
9022 /* Extend global scheduler structures (those, that live across calls to
9023    schedule_block) to include information about just emitted INSN.  */
9024 static void
extend_h_i_d(void)9025 extend_h_i_d (void)
9026 {
9027   int reserve = (get_max_uid () + 1 - h_i_d.length ());
9028   if (reserve > 0
9029       && ! h_i_d.space (reserve))
9030     {
9031       h_i_d.safe_grow_cleared (3 * get_max_uid () / 2);
9032       sched_extend_target ();
9033     }
9034 }
9035 
9036 /* Initialize h_i_d entry of the INSN with default values.
9037    Values, that are not explicitly initialized here, hold zero.  */
9038 static void
init_h_i_d(rtx_insn * insn)9039 init_h_i_d (rtx_insn *insn)
9040 {
9041   if (INSN_LUID (insn) > 0)
9042     {
9043       INSN_COST (insn) = -1;
9044       QUEUE_INDEX (insn) = QUEUE_NOWHERE;
9045       INSN_TICK (insn) = INVALID_TICK;
9046       INSN_EXACT_TICK (insn) = INVALID_TICK;
9047       INTER_TICK (insn) = INVALID_TICK;
9048       TODO_SPEC (insn) = HARD_DEP;
9049       INSN_AUTOPREF_MULTIPASS_DATA (insn)[0].status
9050 	= AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
9051       INSN_AUTOPREF_MULTIPASS_DATA (insn)[1].status
9052 	= AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
9053     }
9054 }
9055 
9056 /* Initialize haifa_insn_data for BBS.  */
9057 void
haifa_init_h_i_d(bb_vec_t bbs)9058 haifa_init_h_i_d (bb_vec_t bbs)
9059 {
9060   int i;
9061   basic_block bb;
9062 
9063   extend_h_i_d ();
9064   FOR_EACH_VEC_ELT (bbs, i, bb)
9065     {
9066       rtx_insn *insn;
9067 
9068       FOR_BB_INSNS (bb, insn)
9069 	init_h_i_d (insn);
9070     }
9071 }
9072 
9073 /* Finalize haifa_insn_data.  */
9074 void
haifa_finish_h_i_d(void)9075 haifa_finish_h_i_d (void)
9076 {
9077   int i;
9078   haifa_insn_data_t data;
9079   reg_use_data *use, *next_use;
9080   reg_set_data *set, *next_set;
9081 
9082   FOR_EACH_VEC_ELT (h_i_d, i, data)
9083     {
9084       free (data->max_reg_pressure);
9085       free (data->reg_pressure);
9086       for (use = data->reg_use_list; use != NULL; use = next_use)
9087 	{
9088 	  next_use = use->next_insn_use;
9089 	  free (use);
9090 	}
9091       for (set = data->reg_set_list; set != NULL; set = next_set)
9092 	{
9093 	  next_set = set->next_insn_set;
9094 	  free (set);
9095 	}
9096 
9097     }
9098   h_i_d.release ();
9099 }
9100 
9101 /* Init data for the new insn INSN.  */
9102 static void
haifa_init_insn(rtx_insn * insn)9103 haifa_init_insn (rtx_insn *insn)
9104 {
9105   gcc_assert (insn != NULL);
9106 
9107   sched_extend_luids ();
9108   sched_init_insn_luid (insn);
9109   sched_extend_target ();
9110   sched_deps_init (false);
9111   extend_h_i_d ();
9112   init_h_i_d (insn);
9113 
9114   if (adding_bb_to_current_region_p)
9115     {
9116       sd_init_insn (insn);
9117 
9118       /* Extend dependency caches by one element.  */
9119       extend_dependency_caches (1, false);
9120     }
9121   if (sched_pressure != SCHED_PRESSURE_NONE)
9122     init_insn_reg_pressure_info (insn);
9123 }
9124 
9125 /* Init data for the new basic block BB which comes after AFTER.  */
9126 static void
haifa_init_only_bb(basic_block bb,basic_block after)9127 haifa_init_only_bb (basic_block bb, basic_block after)
9128 {
9129   gcc_assert (bb != NULL);
9130 
9131   sched_init_bbs ();
9132 
9133   if (common_sched_info->add_block)
9134     /* This changes only data structures of the front-end.  */
9135     common_sched_info->add_block (bb, after);
9136 }
9137 
9138 /* A generic version of sched_split_block ().  */
9139 basic_block
sched_split_block_1(basic_block first_bb,rtx after)9140 sched_split_block_1 (basic_block first_bb, rtx after)
9141 {
9142   edge e;
9143 
9144   e = split_block (first_bb, after);
9145   gcc_assert (e->src == first_bb);
9146 
9147   /* sched_split_block emits note if *check == BB_END.  Probably it
9148      is better to rip that note off.  */
9149 
9150   return e->dest;
9151 }
9152 
9153 /* A generic version of sched_create_empty_bb ().  */
9154 basic_block
sched_create_empty_bb_1(basic_block after)9155 sched_create_empty_bb_1 (basic_block after)
9156 {
9157   return create_empty_bb (after);
9158 }
9159 
9160 /* Insert PAT as an INSN into the schedule and update the necessary data
9161    structures to account for it. */
9162 rtx_insn *
sched_emit_insn(rtx pat)9163 sched_emit_insn (rtx pat)
9164 {
9165   rtx_insn *insn = emit_insn_before (pat, first_nonscheduled_insn ());
9166   haifa_init_insn (insn);
9167 
9168   if (current_sched_info->add_remove_insn)
9169     current_sched_info->add_remove_insn (insn, 0);
9170 
9171   (*current_sched_info->begin_schedule_ready) (insn);
9172   scheduled_insns.safe_push (insn);
9173 
9174   last_scheduled_insn = insn;
9175   return insn;
9176 }
9177 
9178 /* This function returns a candidate satisfying dispatch constraints from
9179    the ready list.  */
9180 
9181 static rtx_insn *
ready_remove_first_dispatch(struct ready_list * ready)9182 ready_remove_first_dispatch (struct ready_list *ready)
9183 {
9184   int i;
9185   rtx_insn *insn = ready_element (ready, 0);
9186 
9187   if (ready->n_ready == 1
9188       || !INSN_P (insn)
9189       || INSN_CODE (insn) < 0
9190       || !active_insn_p (insn)
9191       || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
9192     return ready_remove_first (ready);
9193 
9194   for (i = 1; i < ready->n_ready; i++)
9195     {
9196       insn = ready_element (ready, i);
9197 
9198       if (!INSN_P (insn)
9199 	  || INSN_CODE (insn) < 0
9200 	  || !active_insn_p (insn))
9201 	continue;
9202 
9203       if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
9204 	{
9205 	  /* Return ith element of ready.  */
9206 	  insn = ready_remove (ready, i);
9207 	  return insn;
9208 	}
9209     }
9210 
9211   if (targetm.sched.dispatch (NULL, DISPATCH_VIOLATION))
9212     return ready_remove_first (ready);
9213 
9214   for (i = 1; i < ready->n_ready; i++)
9215     {
9216       insn = ready_element (ready, i);
9217 
9218       if (!INSN_P (insn)
9219 	  || INSN_CODE (insn) < 0
9220 	  || !active_insn_p (insn))
9221 	continue;
9222 
9223       /* Return i-th element of ready.  */
9224       if (targetm.sched.dispatch (insn, IS_CMP))
9225 	return ready_remove (ready, i);
9226     }
9227 
9228   return ready_remove_first (ready);
9229 }
9230 
9231 /* Get number of ready insn in the ready list.  */
9232 
9233 int
number_in_ready(void)9234 number_in_ready (void)
9235 {
9236   return ready.n_ready;
9237 }
9238 
9239 /* Get number of ready's in the ready list.  */
9240 
9241 rtx_insn *
get_ready_element(int i)9242 get_ready_element (int i)
9243 {
9244   return ready_element (&ready, i);
9245 }
9246 
9247 #endif /* INSN_SCHEDULING */
9248