1 /* Instruction scheduling pass.
2    Copyright (C) 1992-2020 Free Software Foundation, Inc.
3    Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4    and currently maintained by, Jim Wilson (wilson@cygnus.com)
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 /* Instruction scheduling pass.  This file, along with sched-deps.c,
23    contains the generic parts.  The actual entry point for
24    the normal instruction scheduling pass is found in sched-rgn.c.
25 
26    We compute insn priorities based on data dependencies.  Flow
27    analysis only creates a fraction of the data-dependencies we must
28    observe: namely, only those dependencies which the combiner can be
29    expected to use.  For this pass, we must therefore create the
30    remaining dependencies we need to observe: register dependencies,
31    memory dependencies, dependencies to keep function calls in order,
32    and the dependence between a conditional branch and the setting of
33    condition codes are all dealt with here.
34 
35    The scheduler first traverses the data flow graph, starting with
36    the last instruction, and proceeding to the first, assigning values
37    to insn_priority as it goes.  This sorts the instructions
38    topologically by data dependence.
39 
40    Once priorities have been established, we order the insns using
41    list scheduling.  This works as follows: starting with a list of
42    all the ready insns, and sorted according to priority number, we
43    schedule the insn from the end of the list by placing its
44    predecessors in the list according to their priority order.  We
45    consider this insn scheduled by setting the pointer to the "end" of
46    the list to point to the previous insn.  When an insn has no
47    predecessors, we either queue it until sufficient time has elapsed
48    or add it to the ready list.  As the instructions are scheduled or
49    when stalls are introduced, the queue advances and dumps insns into
50    the ready list.  When all insns down to the lowest priority have
51    been scheduled, the critical path of the basic block has been made
52    as short as possible.  The remaining insns are then scheduled in
53    remaining slots.
54 
55    The following list shows the order in which we want to break ties
56    among insns in the ready list:
57 
58    1.  choose insn with the longest path to end of bb, ties
59    broken by
60    2.  choose insn with least contribution to register pressure,
61    ties broken by
62    3.  prefer in-block upon interblock motion, ties broken by
63    4.  prefer useful upon speculative motion, ties broken by
64    5.  choose insn with largest control flow probability, ties
65    broken by
66    6.  choose insn with the least dependences upon the previously
67    scheduled insn, or finally
68    7   choose the insn which has the most insns dependent on it.
69    8.  choose insn with lowest UID.
70 
71    Memory references complicate matters.  Only if we can be certain
72    that memory references are not part of the data dependency graph
73    (via true, anti, or output dependence), can we move operations past
74    memory references.  To first approximation, reads can be done
75    independently, while writes introduce dependencies.  Better
76    approximations will yield fewer dependencies.
77 
78    Before reload, an extended analysis of interblock data dependences
79    is required for interblock scheduling.  This is performed in
80    compute_block_dependences ().
81 
82    Dependencies set up by memory references are treated in exactly the
83    same way as other dependencies, by using insn backward dependences
84    INSN_BACK_DEPS.  INSN_BACK_DEPS are translated into forward dependences
85    INSN_FORW_DEPS for the purpose of forward list scheduling.
86 
87    Having optimized the critical path, we may have also unduly
88    extended the lifetimes of some registers.  If an operation requires
89    that constants be loaded into registers, it is certainly desirable
90    to load those constants as early as necessary, but no earlier.
91    I.e., it will not do to load up a bunch of registers at the
92    beginning of a basic block only to use them at the end, if they
93    could be loaded later, since this may result in excessive register
94    utilization.
95 
96    Note that since branches are never in basic blocks, but only end
97    basic blocks, this pass will not move branches.  But that is ok,
98    since we can use GNU's delayed branch scheduling pass to take care
99    of this case.
100 
101    Also note that no further optimizations based on algebraic
102    identities are performed, so this pass would be a good one to
103    perform instruction splitting, such as breaking up a multiply
104    instruction into shifts and adds where that is profitable.
105 
106    Given the memory aliasing analysis that this pass should perform,
107    it should be possible to remove redundant stores to memory, and to
108    load values from registers instead of hitting memory.
109 
110    Before reload, speculative insns are moved only if a 'proof' exists
111    that no exception will be caused by this, and if no live registers
112    exist that inhibit the motion (live registers constraints are not
113    represented by data dependence edges).
114 
115    This pass must update information that subsequent passes expect to
116    be correct.  Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
117    reg_n_calls_crossed, and reg_live_length.  Also, BB_HEAD, BB_END.
118 
119    The information in the line number notes is carefully retained by
120    this pass.  Notes that refer to the starting and ending of
121    exception regions are also carefully retained by this pass.  All
122    other NOTE insns are grouped in their same relative order at the
123    beginning of basic blocks and regions that have been scheduled.  */
124 
125 #include "config.h"
126 #include "system.h"
127 #include "coretypes.h"
128 #include "backend.h"
129 #include "target.h"
130 #include "rtl.h"
131 #include "cfghooks.h"
132 #include "df.h"
133 #include "memmodel.h"
134 #include "tm_p.h"
135 #include "insn-config.h"
136 #include "regs.h"
137 #include "ira.h"
138 #include "recog.h"
139 #include "insn-attr.h"
140 #include "cfgrtl.h"
141 #include "cfgbuild.h"
142 #include "sched-int.h"
143 #include "common/common-target.h"
144 #include "dbgcnt.h"
145 #include "cfgloop.h"
146 #include "dumpfile.h"
147 #include "print-rtl.h"
148 #include "function-abi.h"
149 
150 #ifdef INSN_SCHEDULING
151 
152 /* True if we do register pressure relief through live-range
153    shrinkage.  */
154 static bool live_range_shrinkage_p;
155 
156 /* Switch on live range shrinkage.  */
157 void
initialize_live_range_shrinkage(void)158 initialize_live_range_shrinkage (void)
159 {
160   live_range_shrinkage_p = true;
161 }
162 
163 /* Switch off live range shrinkage.  */
164 void
finish_live_range_shrinkage(void)165 finish_live_range_shrinkage (void)
166 {
167   live_range_shrinkage_p = false;
168 }
169 
170 /* issue_rate is the number of insns that can be scheduled in the same
171    machine cycle.  It can be defined in the config/mach/mach.h file,
172    otherwise we set it to 1.  */
173 
174 int issue_rate;
175 
176 /* This can be set to true by a backend if the scheduler should not
177    enable a DCE pass.  */
178 bool sched_no_dce;
179 
180 /* The current initiation interval used when modulo scheduling.  */
181 static int modulo_ii;
182 
183 /* The maximum number of stages we are prepared to handle.  */
184 static int modulo_max_stages;
185 
186 /* The number of insns that exist in each iteration of the loop.  We use this
187    to detect when we've scheduled all insns from the first iteration.  */
188 static int modulo_n_insns;
189 
190 /* The current count of insns in the first iteration of the loop that have
191    already been scheduled.  */
192 static int modulo_insns_scheduled;
193 
194 /* The maximum uid of insns from the first iteration of the loop.  */
195 static int modulo_iter0_max_uid;
196 
197 /* The number of times we should attempt to backtrack when modulo scheduling.
198    Decreased each time we have to backtrack.  */
199 static int modulo_backtracks_left;
200 
201 /* The stage in which the last insn from the original loop was
202    scheduled.  */
203 static int modulo_last_stage;
204 
205 /* sched-verbose controls the amount of debugging output the
206    scheduler prints.  It is controlled by -fsched-verbose=N:
207    N=0: no debugging output.
208    N=1: default value.
209    N=2: bb's probabilities, detailed ready list info, unit/insn info.
210    N=3: rtl at abort point, control-flow, regions info.
211    N=5: dependences info.  */
212 int sched_verbose = 0;
213 
214 /* Debugging file.  All printouts are sent to dump. */
215 FILE *sched_dump = 0;
216 
217 /* This is a placeholder for the scheduler parameters common
218    to all schedulers.  */
219 struct common_sched_info_def *common_sched_info;
220 
221 #define INSN_TICK(INSN)	(HID (INSN)->tick)
222 #define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick)
223 #define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate)
224 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
225 #define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
226 #define SHADOW_P(INSN) (HID (INSN)->shadow_p)
227 #define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
228 /* Cached cost of the instruction.  Use insn_sched_cost to get cost of the
229    insn.  -1 here means that the field is not initialized.  */
230 #define INSN_COST(INSN)	(HID (INSN)->cost)
231 
232 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
233    then it should be recalculated from scratch.  */
234 #define INVALID_TICK (-(max_insn_queue_index + 1))
235 /* The minimal value of the INSN_TICK of an instruction.  */
236 #define MIN_TICK (-max_insn_queue_index)
237 
238 /* Original order of insns in the ready list.
239    Used to keep order of normal insns while separating DEBUG_INSNs.  */
240 #define INSN_RFS_DEBUG_ORIG_ORDER(INSN) (HID (INSN)->rfs_debug_orig_order)
241 
242 /* The deciding reason for INSN's place in the ready list.  */
243 #define INSN_LAST_RFS_WIN(INSN) (HID (INSN)->last_rfs_win)
244 
245 /* List of important notes we must keep around.  This is a pointer to the
246    last element in the list.  */
247 rtx_insn *note_list;
248 
249 static struct spec_info_def spec_info_var;
250 /* Description of the speculative part of the scheduling.
251    If NULL - no speculation.  */
252 spec_info_t spec_info = NULL;
253 
254 /* True, if recovery block was added during scheduling of current block.
255    Used to determine, if we need to fix INSN_TICKs.  */
256 static bool haifa_recovery_bb_recently_added_p;
257 
258 /* True, if recovery block was added during this scheduling pass.
259    Used to determine if we should have empty memory pools of dependencies
260    after finishing current region.  */
261 bool haifa_recovery_bb_ever_added_p;
262 
263 /* Counters of different types of speculative instructions.  */
264 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
265 
266 /* Array used in {unlink, restore}_bb_notes.  */
267 static rtx_insn **bb_header = 0;
268 
269 /* Basic block after which recovery blocks will be created.  */
270 static basic_block before_recovery;
271 
272 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
273    created it.  */
274 basic_block after_recovery;
275 
276 /* FALSE if we add bb to another region, so we don't need to initialize it.  */
277 bool adding_bb_to_current_region_p = true;
278 
279 /* Queues, etc.  */
280 
281 /* An instruction is ready to be scheduled when all insns preceding it
282    have already been scheduled.  It is important to ensure that all
283    insns which use its result will not be executed until its result
284    has been computed.  An insn is maintained in one of four structures:
285 
286    (P) the "Pending" set of insns which cannot be scheduled until
287    their dependencies have been satisfied.
288    (Q) the "Queued" set of insns that can be scheduled when sufficient
289    time has passed.
290    (R) the "Ready" list of unscheduled, uncommitted insns.
291    (S) the "Scheduled" list of insns.
292 
293    Initially, all insns are either "Pending" or "Ready" depending on
294    whether their dependencies are satisfied.
295 
296    Insns move from the "Ready" list to the "Scheduled" list as they
297    are committed to the schedule.  As this occurs, the insns in the
298    "Pending" list have their dependencies satisfied and move to either
299    the "Ready" list or the "Queued" set depending on whether
300    sufficient time has passed to make them ready.  As time passes,
301    insns move from the "Queued" set to the "Ready" list.
302 
303    The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
304    unscheduled insns, i.e., those that are ready, queued, and pending.
305    The "Queued" set (Q) is implemented by the variable `insn_queue'.
306    The "Ready" list (R) is implemented by the variables `ready' and
307    `n_ready'.
308    The "Scheduled" list (S) is the new insn chain built by this pass.
309 
310    The transition (R->S) is implemented in the scheduling loop in
311    `schedule_block' when the best insn to schedule is chosen.
312    The transitions (P->R and P->Q) are implemented in `schedule_insn' as
313    insns move from the ready list to the scheduled list.
314    The transition (Q->R) is implemented in 'queue_to_insn' as time
315    passes or stalls are introduced.  */
316 
317 /* Implement a circular buffer to delay instructions until sufficient
318    time has passed.  For the new pipeline description interface,
319    MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
320    than maximal time of instruction execution computed by genattr.c on
321    the base maximal time of functional unit reservations and getting a
322    result.  This is the longest time an insn may be queued.  */
323 
324 static rtx_insn_list **insn_queue;
325 static int q_ptr = 0;
326 static int q_size = 0;
327 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
328 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
329 
330 #define QUEUE_SCHEDULED (-3)
331 #define QUEUE_NOWHERE   (-2)
332 #define QUEUE_READY     (-1)
333 /* QUEUE_SCHEDULED - INSN is scheduled.
334    QUEUE_NOWHERE   - INSN isn't scheduled yet and is neither in
335    queue or ready list.
336    QUEUE_READY     - INSN is in ready list.
337    N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles.  */
338 
339 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
340 
341 /* The following variable value refers for all current and future
342    reservations of the processor units.  */
343 state_t curr_state;
344 
345 /* The following variable value is size of memory representing all
346    current and future reservations of the processor units.  */
347 size_t dfa_state_size;
348 
349 /* The following array is used to find the best insn from ready when
350    the automaton pipeline interface is used.  */
351 signed char *ready_try = NULL;
352 
353 /* The ready list.  */
354 struct ready_list ready = {NULL, 0, 0, 0, 0};
355 
356 /* The pointer to the ready list (to be removed).  */
357 static struct ready_list *readyp = &ready;
358 
359 /* Scheduling clock.  */
360 static int clock_var;
361 
362 /* Clock at which the previous instruction was issued.  */
363 static int last_clock_var;
364 
365 /* Set to true if, when queuing a shadow insn, we discover that it would be
366    scheduled too late.  */
367 static bool must_backtrack;
368 
369 /* The following variable value is number of essential insns issued on
370    the current cycle.  An insn is essential one if it changes the
371    processors state.  */
372 int cycle_issued_insns;
373 
374 /* This records the actual schedule.  It is built up during the main phase
375    of schedule_block, and afterwards used to reorder the insns in the RTL.  */
376 static vec<rtx_insn *> scheduled_insns;
377 
378 static int may_trap_exp (const_rtx, int);
379 
380 /* Nonzero iff the address is comprised from at most 1 register.  */
381 #define CONST_BASED_ADDRESS_P(x)			\
382   (REG_P (x)					\
383    || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS	\
384 	|| (GET_CODE (x) == LO_SUM))			\
385        && (CONSTANT_P (XEXP (x, 0))			\
386 	   || CONSTANT_P (XEXP (x, 1)))))
387 
388 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
389    as found by analyzing insn's expression.  */
390 
391 
392 static int haifa_luid_for_non_insn (rtx x);
393 
394 /* Haifa version of sched_info hooks common to all headers.  */
395 const struct common_sched_info_def haifa_common_sched_info =
396   {
397     NULL, /* fix_recovery_cfg */
398     NULL, /* add_block */
399     NULL, /* estimate_number_of_insns */
400     haifa_luid_for_non_insn, /* luid_for_non_insn */
401     SCHED_PASS_UNKNOWN /* sched_pass_id */
402   };
403 
404 /* Mapping from instruction UID to its Logical UID.  */
405 vec<int> sched_luids;
406 
407 /* Next LUID to assign to an instruction.  */
408 int sched_max_luid = 1;
409 
410 /* Haifa Instruction Data.  */
411 vec<haifa_insn_data_def> h_i_d;
412 
413 void (* sched_init_only_bb) (basic_block, basic_block);
414 
415 /* Split block function.  Different schedulers might use different functions
416    to handle their internal data consistent.  */
417 basic_block (* sched_split_block) (basic_block, rtx);
418 
419 /* Create empty basic block after the specified block.  */
420 basic_block (* sched_create_empty_bb) (basic_block);
421 
422 /* Return the number of cycles until INSN is expected to be ready.
423    Return zero if it already is.  */
424 static int
insn_delay(rtx_insn * insn)425 insn_delay (rtx_insn *insn)
426 {
427   return MAX (INSN_TICK (insn) - clock_var, 0);
428 }
429 
430 static int
may_trap_exp(const_rtx x,int is_store)431 may_trap_exp (const_rtx x, int is_store)
432 {
433   enum rtx_code code;
434 
435   if (x == 0)
436     return TRAP_FREE;
437   code = GET_CODE (x);
438   if (is_store)
439     {
440       if (code == MEM && may_trap_p (x))
441 	return TRAP_RISKY;
442       else
443 	return TRAP_FREE;
444     }
445   if (code == MEM)
446     {
447       /* The insn uses memory:  a volatile load.  */
448       if (MEM_VOLATILE_P (x))
449 	return IRISKY;
450       /* An exception-free load.  */
451       if (!may_trap_p (x))
452 	return IFREE;
453       /* A load with 1 base register, to be further checked.  */
454       if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
455 	return PFREE_CANDIDATE;
456       /* No info on the load, to be further checked.  */
457       return PRISKY_CANDIDATE;
458     }
459   else
460     {
461       const char *fmt;
462       int i, insn_class = TRAP_FREE;
463 
464       /* Neither store nor load, check if it may cause a trap.  */
465       if (may_trap_p (x))
466 	return TRAP_RISKY;
467       /* Recursive step: walk the insn...  */
468       fmt = GET_RTX_FORMAT (code);
469       for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
470 	{
471 	  if (fmt[i] == 'e')
472 	    {
473 	      int tmp_class = may_trap_exp (XEXP (x, i), is_store);
474 	      insn_class = WORST_CLASS (insn_class, tmp_class);
475 	    }
476 	  else if (fmt[i] == 'E')
477 	    {
478 	      int j;
479 	      for (j = 0; j < XVECLEN (x, i); j++)
480 		{
481 		  int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
482 		  insn_class = WORST_CLASS (insn_class, tmp_class);
483 		  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
484 		    break;
485 		}
486 	    }
487 	  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
488 	    break;
489 	}
490       return insn_class;
491     }
492 }
493 
494 /* Classifies rtx X of an insn for the purpose of verifying that X can be
495    executed speculatively (and consequently the insn can be moved
496    speculatively), by examining X, returning:
497    TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
498    TRAP_FREE: non-load insn.
499    IFREE: load from a globally safe location.
500    IRISKY: volatile load.
501    PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
502    being either PFREE or PRISKY.  */
503 
504 static int
haifa_classify_rtx(const_rtx x)505 haifa_classify_rtx (const_rtx x)
506 {
507   int tmp_class = TRAP_FREE;
508   int insn_class = TRAP_FREE;
509   enum rtx_code code;
510 
511   if (GET_CODE (x) == PARALLEL)
512     {
513       int i, len = XVECLEN (x, 0);
514 
515       for (i = len - 1; i >= 0; i--)
516 	{
517 	  tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
518 	  insn_class = WORST_CLASS (insn_class, tmp_class);
519 	  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
520 	    break;
521 	}
522     }
523   else
524     {
525       code = GET_CODE (x);
526       switch (code)
527 	{
528 	case CLOBBER:
529 	  /* Test if it is a 'store'.  */
530 	  tmp_class = may_trap_exp (XEXP (x, 0), 1);
531 	  break;
532 	case SET:
533 	  /* Test if it is a store.  */
534 	  tmp_class = may_trap_exp (SET_DEST (x), 1);
535 	  if (tmp_class == TRAP_RISKY)
536 	    break;
537 	  /* Test if it is a load.  */
538 	  tmp_class =
539 	    WORST_CLASS (tmp_class,
540 			 may_trap_exp (SET_SRC (x), 0));
541 	  break;
542 	case COND_EXEC:
543 	  tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
544 	  if (tmp_class == TRAP_RISKY)
545 	    break;
546 	  tmp_class = WORST_CLASS (tmp_class,
547 				   may_trap_exp (COND_EXEC_TEST (x), 0));
548 	  break;
549 	case TRAP_IF:
550 	  tmp_class = TRAP_RISKY;
551 	  break;
552 	default:;
553 	}
554       insn_class = tmp_class;
555     }
556 
557   return insn_class;
558 }
559 
560 int
haifa_classify_insn(const_rtx insn)561 haifa_classify_insn (const_rtx insn)
562 {
563   return haifa_classify_rtx (PATTERN (insn));
564 }
565 
566 /* After the scheduler initialization function has been called, this function
567    can be called to enable modulo scheduling.  II is the initiation interval
568    we should use, it affects the delays for delay_pairs that were recorded as
569    separated by a given number of stages.
570 
571    MAX_STAGES provides us with a limit
572    after which we give up scheduling; the caller must have unrolled at least
573    as many copies of the loop body and recorded delay_pairs for them.
574 
575    INSNS is the number of real (non-debug) insns in one iteration of
576    the loop.  MAX_UID can be used to test whether an insn belongs to
577    the first iteration of the loop; all of them have a uid lower than
578    MAX_UID.  */
579 void
set_modulo_params(int ii,int max_stages,int insns,int max_uid)580 set_modulo_params (int ii, int max_stages, int insns, int max_uid)
581 {
582   modulo_ii = ii;
583   modulo_max_stages = max_stages;
584   modulo_n_insns = insns;
585   modulo_iter0_max_uid = max_uid;
586   modulo_backtracks_left = param_max_modulo_backtrack_attempts;
587 }
588 
589 /* A structure to record a pair of insns where the first one is a real
590    insn that has delay slots, and the second is its delayed shadow.
591    I1 is scheduled normally and will emit an assembly instruction,
592    while I2 describes the side effect that takes place at the
593    transition between cycles CYCLES and (CYCLES + 1) after I1.  */
594 struct delay_pair
595 {
596   struct delay_pair *next_same_i1;
597   rtx_insn *i1, *i2;
598   int cycles;
599   /* When doing modulo scheduling, we a delay_pair can also be used to
600      show that I1 and I2 are the same insn in a different stage.  If that
601      is the case, STAGES will be nonzero.  */
602   int stages;
603 };
604 
605 /* Helpers for delay hashing.  */
606 
607 struct delay_i1_hasher : nofree_ptr_hash <delay_pair>
608 {
609   typedef void *compare_type;
610   static inline hashval_t hash (const delay_pair *);
611   static inline bool equal (const delay_pair *, const void *);
612 };
613 
614 /* Returns a hash value for X, based on hashing just I1.  */
615 
616 inline hashval_t
hash(const delay_pair * x)617 delay_i1_hasher::hash (const delay_pair *x)
618 {
619   return htab_hash_pointer (x->i1);
620 }
621 
622 /* Return true if I1 of pair X is the same as that of pair Y.  */
623 
624 inline bool
equal(const delay_pair * x,const void * y)625 delay_i1_hasher::equal (const delay_pair *x, const void *y)
626 {
627   return x->i1 == y;
628 }
629 
630 struct delay_i2_hasher : free_ptr_hash <delay_pair>
631 {
632   typedef void *compare_type;
633   static inline hashval_t hash (const delay_pair *);
634   static inline bool equal (const delay_pair *, const void *);
635 };
636 
637 /* Returns a hash value for X, based on hashing just I2.  */
638 
639 inline hashval_t
hash(const delay_pair * x)640 delay_i2_hasher::hash (const delay_pair *x)
641 {
642   return htab_hash_pointer (x->i2);
643 }
644 
645 /* Return true if I2 of pair X is the same as that of pair Y.  */
646 
647 inline bool
equal(const delay_pair * x,const void * y)648 delay_i2_hasher::equal (const delay_pair *x, const void *y)
649 {
650   return x->i2 == y;
651 }
652 
653 /* Two hash tables to record delay_pairs, one indexed by I1 and the other
654    indexed by I2.  */
655 static hash_table<delay_i1_hasher> *delay_htab;
656 static hash_table<delay_i2_hasher> *delay_htab_i2;
657 
658 /* Called through htab_traverse.  Walk the hashtable using I2 as
659    index, and delete all elements involving an UID higher than
660    that pointed to by *DATA.  */
661 int
haifa_htab_i2_traverse(delay_pair ** slot,int * data)662 haifa_htab_i2_traverse (delay_pair **slot, int *data)
663 {
664   int maxuid = *data;
665   struct delay_pair *p = *slot;
666   if (INSN_UID (p->i2) >= maxuid || INSN_UID (p->i1) >= maxuid)
667     {
668       delay_htab_i2->clear_slot (slot);
669     }
670   return 1;
671 }
672 
673 /* Called through htab_traverse.  Walk the hashtable using I2 as
674    index, and delete all elements involving an UID higher than
675    that pointed to by *DATA.  */
676 int
haifa_htab_i1_traverse(delay_pair ** pslot,int * data)677 haifa_htab_i1_traverse (delay_pair **pslot, int *data)
678 {
679   int maxuid = *data;
680   struct delay_pair *p, *first, **pprev;
681 
682   if (INSN_UID ((*pslot)->i1) >= maxuid)
683     {
684       delay_htab->clear_slot (pslot);
685       return 1;
686     }
687   pprev = &first;
688   for (p = *pslot; p; p = p->next_same_i1)
689     {
690       if (INSN_UID (p->i2) < maxuid)
691 	{
692 	  *pprev = p;
693 	  pprev = &p->next_same_i1;
694 	}
695     }
696   *pprev = NULL;
697   if (first == NULL)
698     delay_htab->clear_slot (pslot);
699   else
700     *pslot = first;
701   return 1;
702 }
703 
704 /* Discard all delay pairs which involve an insn with an UID higher
705    than MAX_UID.  */
706 void
discard_delay_pairs_above(int max_uid)707 discard_delay_pairs_above (int max_uid)
708 {
709   delay_htab->traverse <int *, haifa_htab_i1_traverse> (&max_uid);
710   delay_htab_i2->traverse <int *, haifa_htab_i2_traverse> (&max_uid);
711 }
712 
713 /* This function can be called by a port just before it starts the final
714    scheduling pass.  It records the fact that an instruction with delay
715    slots has been split into two insns, I1 and I2.  The first one will be
716    scheduled normally and initiates the operation.  The second one is a
717    shadow which must follow a specific number of cycles after I1; its only
718    purpose is to show the side effect that occurs at that cycle in the RTL.
719    If a JUMP_INSN or a CALL_INSN has been split, I1 should be a normal INSN,
720    while I2 retains the original insn type.
721 
722    There are two ways in which the number of cycles can be specified,
723    involving the CYCLES and STAGES arguments to this function.  If STAGES
724    is zero, we just use the value of CYCLES.  Otherwise, STAGES is a factor
725    which is multiplied by MODULO_II to give the number of cycles.  This is
726    only useful if the caller also calls set_modulo_params to enable modulo
727    scheduling.  */
728 
729 void
record_delay_slot_pair(rtx_insn * i1,rtx_insn * i2,int cycles,int stages)730 record_delay_slot_pair (rtx_insn *i1, rtx_insn *i2, int cycles, int stages)
731 {
732   struct delay_pair *p = XNEW (struct delay_pair);
733   struct delay_pair **slot;
734 
735   p->i1 = i1;
736   p->i2 = i2;
737   p->cycles = cycles;
738   p->stages = stages;
739 
740   if (!delay_htab)
741     {
742       delay_htab = new hash_table<delay_i1_hasher> (10);
743       delay_htab_i2 = new hash_table<delay_i2_hasher> (10);
744     }
745   slot = delay_htab->find_slot_with_hash (i1, htab_hash_pointer (i1), INSERT);
746   p->next_same_i1 = *slot;
747   *slot = p;
748   slot = delay_htab_i2->find_slot (p, INSERT);
749   *slot = p;
750 }
751 
752 /* Examine the delay pair hashtable to see if INSN is a shadow for another,
753    and return the other insn if so.  Return NULL otherwise.  */
754 rtx_insn *
real_insn_for_shadow(rtx_insn * insn)755 real_insn_for_shadow (rtx_insn *insn)
756 {
757   struct delay_pair *pair;
758 
759   if (!delay_htab)
760     return NULL;
761 
762   pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
763   if (!pair || pair->stages > 0)
764     return NULL;
765   return pair->i1;
766 }
767 
768 /* For a pair P of insns, return the fixed distance in cycles from the first
769    insn after which the second must be scheduled.  */
770 static int
pair_delay(struct delay_pair * p)771 pair_delay (struct delay_pair *p)
772 {
773   if (p->stages == 0)
774     return p->cycles;
775   else
776     return p->stages * modulo_ii;
777 }
778 
779 /* Given an insn INSN, add a dependence on its delayed shadow if it
780    has one.  Also try to find situations where shadows depend on each other
781    and add dependencies to the real insns to limit the amount of backtracking
782    needed.  */
783 void
add_delay_dependencies(rtx_insn * insn)784 add_delay_dependencies (rtx_insn *insn)
785 {
786   struct delay_pair *pair;
787   sd_iterator_def sd_it;
788   dep_t dep;
789 
790   if (!delay_htab)
791     return;
792 
793   pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
794   if (!pair)
795     return;
796   add_dependence (insn, pair->i1, REG_DEP_ANTI);
797   if (pair->stages)
798     return;
799 
800   FOR_EACH_DEP (pair->i2, SD_LIST_BACK, sd_it, dep)
801     {
802       rtx_insn *pro = DEP_PRO (dep);
803       struct delay_pair *other_pair
804 	= delay_htab_i2->find_with_hash (pro, htab_hash_pointer (pro));
805       if (!other_pair || other_pair->stages)
806 	continue;
807       if (pair_delay (other_pair) >= pair_delay (pair))
808 	{
809 	  if (sched_verbose >= 4)
810 	    {
811 	      fprintf (sched_dump, ";;\tadding dependence %d <- %d\n",
812 		       INSN_UID (other_pair->i1),
813 		       INSN_UID (pair->i1));
814 	      fprintf (sched_dump, ";;\tpair1 %d <- %d, cost %d\n",
815 		       INSN_UID (pair->i1),
816 		       INSN_UID (pair->i2),
817 		       pair_delay (pair));
818 	      fprintf (sched_dump, ";;\tpair2 %d <- %d, cost %d\n",
819 		       INSN_UID (other_pair->i1),
820 		       INSN_UID (other_pair->i2),
821 		       pair_delay (other_pair));
822 	    }
823 	  add_dependence (pair->i1, other_pair->i1, REG_DEP_ANTI);
824 	}
825     }
826 }
827 
828 /* Forward declarations.  */
829 
830 static int priority (rtx_insn *, bool force_recompute = false);
831 static int autopref_rank_for_schedule (const rtx_insn *, const rtx_insn *);
832 static int rank_for_schedule (const void *, const void *);
833 static void swap_sort (rtx_insn **, int);
834 static void queue_insn (rtx_insn *, int, const char *);
835 static int schedule_insn (rtx_insn *);
836 static void adjust_priority (rtx_insn *);
837 static void advance_one_cycle (void);
838 static void extend_h_i_d (void);
839 
840 
841 /* Notes handling mechanism:
842    =========================
843    Generally, NOTES are saved before scheduling and restored after scheduling.
844    The scheduler distinguishes between two types of notes:
845 
846    (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
847    Before scheduling a region, a pointer to the note is added to the insn
848    that follows or precedes it.  (This happens as part of the data dependence
849    computation).  After scheduling an insn, the pointer contained in it is
850    used for regenerating the corresponding note (in reemit_notes).
851 
852    (2) All other notes (e.g. INSN_DELETED):  Before scheduling a block,
853    these notes are put in a list (in rm_other_notes() and
854    unlink_other_notes ()).  After scheduling the block, these notes are
855    inserted at the beginning of the block (in schedule_block()).  */
856 
857 static void ready_add (struct ready_list *, rtx_insn *, bool);
858 static rtx_insn *ready_remove_first (struct ready_list *);
859 static rtx_insn *ready_remove_first_dispatch (struct ready_list *ready);
860 
861 static void queue_to_ready (struct ready_list *);
862 static int early_queue_to_ready (state_t, struct ready_list *);
863 
864 /* The following functions are used to implement multi-pass scheduling
865    on the first cycle.  */
866 static rtx_insn *ready_remove (struct ready_list *, int);
867 static void ready_remove_insn (rtx_insn *);
868 
869 static void fix_inter_tick (rtx_insn *, rtx_insn *);
870 static int fix_tick_ready (rtx_insn *);
871 static void change_queue_index (rtx_insn *, int);
872 
873 /* The following functions are used to implement scheduling of data/control
874    speculative instructions.  */
875 
876 static void extend_h_i_d (void);
877 static void init_h_i_d (rtx_insn *);
878 static int haifa_speculate_insn (rtx_insn *, ds_t, rtx *);
879 static void generate_recovery_code (rtx_insn *);
880 static void process_insn_forw_deps_be_in_spec (rtx_insn *, rtx_insn *, ds_t);
881 static void begin_speculative_block (rtx_insn *);
882 static void add_to_speculative_block (rtx_insn *);
883 static void init_before_recovery (basic_block *);
884 static void create_check_block_twin (rtx_insn *, bool);
885 static void fix_recovery_deps (basic_block);
886 static bool haifa_change_pattern (rtx_insn *, rtx);
887 static void dump_new_block_header (int, basic_block, rtx_insn *, rtx_insn *);
888 static void restore_bb_notes (basic_block);
889 static void fix_jump_move (rtx_insn *);
890 static void move_block_after_check (rtx_insn *);
891 static void move_succs (vec<edge, va_gc> **, basic_block);
892 static void sched_remove_insn (rtx_insn *);
893 static void clear_priorities (rtx_insn *, rtx_vec_t *);
894 static void calc_priorities (rtx_vec_t);
895 static void add_jump_dependencies (rtx_insn *, rtx_insn *);
896 
897 #endif /* INSN_SCHEDULING */
898 
899 /* Point to state used for the current scheduling pass.  */
900 struct haifa_sched_info *current_sched_info;
901 
902 #ifndef INSN_SCHEDULING
903 void
schedule_insns(void)904 schedule_insns (void)
905 {
906 }
907 #else
908 
909 /* Do register pressure sensitive insn scheduling if the flag is set
910    up.  */
911 enum sched_pressure_algorithm sched_pressure;
912 
913 /* Map regno -> its pressure class.  The map defined only when
914    SCHED_PRESSURE != SCHED_PRESSURE_NONE.  */
915 enum reg_class *sched_regno_pressure_class;
916 
917 /* The current register pressure.  Only elements corresponding pressure
918    classes are defined.  */
919 static int curr_reg_pressure[N_REG_CLASSES];
920 
921 /* Saved value of the previous array.  */
922 static int saved_reg_pressure[N_REG_CLASSES];
923 
924 /* Register living at given scheduling point.  */
925 static bitmap curr_reg_live;
926 
927 /* Saved value of the previous array.  */
928 static bitmap saved_reg_live;
929 
930 /* Registers mentioned in the current region.  */
931 static bitmap region_ref_regs;
932 
933 /* Temporary bitmap used for SCHED_PRESSURE_MODEL.  */
934 static bitmap tmp_bitmap;
935 
936 /* Effective number of available registers of a given class (see comment
937    in sched_pressure_start_bb).  */
938 static int sched_class_regs_num[N_REG_CLASSES];
939 /* The number of registers that the function would need to save before it
940    uses them, and the number of fixed_regs.  Helpers for calculating of
941    sched_class_regs_num.  */
942 static int call_saved_regs_num[N_REG_CLASSES];
943 static int fixed_regs_num[N_REG_CLASSES];
944 
945 /* Initiate register pressure relative info for scheduling the current
946    region.  Currently it is only clearing register mentioned in the
947    current region.  */
948 void
sched_init_region_reg_pressure_info(void)949 sched_init_region_reg_pressure_info (void)
950 {
951   bitmap_clear (region_ref_regs);
952 }
953 
954 /* PRESSURE[CL] describes the pressure on register class CL.  Update it
955    for the birth (if BIRTH_P) or death (if !BIRTH_P) of register REGNO.
956    LIVE tracks the set of live registers; if it is null, assume that
957    every birth or death is genuine.  */
958 static inline void
mark_regno_birth_or_death(bitmap live,int * pressure,int regno,bool birth_p)959 mark_regno_birth_or_death (bitmap live, int *pressure, int regno, bool birth_p)
960 {
961   enum reg_class pressure_class;
962 
963   pressure_class = sched_regno_pressure_class[regno];
964   if (regno >= FIRST_PSEUDO_REGISTER)
965     {
966       if (pressure_class != NO_REGS)
967 	{
968 	  if (birth_p)
969 	    {
970 	      if (!live || bitmap_set_bit (live, regno))
971 		pressure[pressure_class]
972 		  += (ira_reg_class_max_nregs
973 		      [pressure_class][PSEUDO_REGNO_MODE (regno)]);
974 	    }
975 	  else
976 	    {
977 	      if (!live || bitmap_clear_bit (live, regno))
978 		pressure[pressure_class]
979 		  -= (ira_reg_class_max_nregs
980 		      [pressure_class][PSEUDO_REGNO_MODE (regno)]);
981 	    }
982 	}
983     }
984   else if (pressure_class != NO_REGS
985 	   && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
986     {
987       if (birth_p)
988 	{
989 	  if (!live || bitmap_set_bit (live, regno))
990 	    pressure[pressure_class]++;
991 	}
992       else
993 	{
994 	  if (!live || bitmap_clear_bit (live, regno))
995 	    pressure[pressure_class]--;
996 	}
997     }
998 }
999 
1000 /* Initiate current register pressure related info from living
1001    registers given by LIVE.  */
1002 static void
initiate_reg_pressure_info(bitmap live)1003 initiate_reg_pressure_info (bitmap live)
1004 {
1005   int i;
1006   unsigned int j;
1007   bitmap_iterator bi;
1008 
1009   for (i = 0; i < ira_pressure_classes_num; i++)
1010     curr_reg_pressure[ira_pressure_classes[i]] = 0;
1011   bitmap_clear (curr_reg_live);
1012   EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
1013     if (sched_pressure == SCHED_PRESSURE_MODEL
1014 	|| current_nr_blocks == 1
1015 	|| bitmap_bit_p (region_ref_regs, j))
1016       mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure, j, true);
1017 }
1018 
1019 /* Mark registers in X as mentioned in the current region.  */
1020 static void
setup_ref_regs(rtx x)1021 setup_ref_regs (rtx x)
1022 {
1023   int i, j;
1024   const RTX_CODE code = GET_CODE (x);
1025   const char *fmt;
1026 
1027   if (REG_P (x))
1028     {
1029       bitmap_set_range (region_ref_regs, REGNO (x), REG_NREGS (x));
1030       return;
1031     }
1032   fmt = GET_RTX_FORMAT (code);
1033   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1034     if (fmt[i] == 'e')
1035       setup_ref_regs (XEXP (x, i));
1036     else if (fmt[i] == 'E')
1037       {
1038 	for (j = 0; j < XVECLEN (x, i); j++)
1039 	  setup_ref_regs (XVECEXP (x, i, j));
1040       }
1041 }
1042 
1043 /* Initiate current register pressure related info at the start of
1044    basic block BB.  */
1045 static void
initiate_bb_reg_pressure_info(basic_block bb)1046 initiate_bb_reg_pressure_info (basic_block bb)
1047 {
1048   unsigned int i ATTRIBUTE_UNUSED;
1049   rtx_insn *insn;
1050 
1051   if (current_nr_blocks > 1)
1052     FOR_BB_INSNS (bb, insn)
1053       if (NONDEBUG_INSN_P (insn))
1054 	setup_ref_regs (PATTERN (insn));
1055   initiate_reg_pressure_info (df_get_live_in (bb));
1056   if (bb_has_eh_pred (bb))
1057     for (i = 0; ; ++i)
1058       {
1059 	unsigned int regno = EH_RETURN_DATA_REGNO (i);
1060 
1061 	if (regno == INVALID_REGNUM)
1062 	  break;
1063 	if (! bitmap_bit_p (df_get_live_in (bb), regno))
1064 	  mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
1065 				     regno, true);
1066       }
1067 }
1068 
1069 /* Save current register pressure related info.  */
1070 static void
save_reg_pressure(void)1071 save_reg_pressure (void)
1072 {
1073   int i;
1074 
1075   for (i = 0; i < ira_pressure_classes_num; i++)
1076     saved_reg_pressure[ira_pressure_classes[i]]
1077       = curr_reg_pressure[ira_pressure_classes[i]];
1078   bitmap_copy (saved_reg_live, curr_reg_live);
1079 }
1080 
1081 /* Restore saved register pressure related info.  */
1082 static void
restore_reg_pressure(void)1083 restore_reg_pressure (void)
1084 {
1085   int i;
1086 
1087   for (i = 0; i < ira_pressure_classes_num; i++)
1088     curr_reg_pressure[ira_pressure_classes[i]]
1089       = saved_reg_pressure[ira_pressure_classes[i]];
1090   bitmap_copy (curr_reg_live, saved_reg_live);
1091 }
1092 
1093 /* Return TRUE if the register is dying after its USE.  */
1094 static bool
dying_use_p(struct reg_use_data * use)1095 dying_use_p (struct reg_use_data *use)
1096 {
1097   struct reg_use_data *next;
1098 
1099   for (next = use->next_regno_use; next != use; next = next->next_regno_use)
1100     if (NONDEBUG_INSN_P (next->insn)
1101 	&& QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
1102       return false;
1103   return true;
1104 }
1105 
1106 /* Print info about the current register pressure and its excess for
1107    each pressure class.  */
1108 static void
print_curr_reg_pressure(void)1109 print_curr_reg_pressure (void)
1110 {
1111   int i;
1112   enum reg_class cl;
1113 
1114   fprintf (sched_dump, ";;\t");
1115   for (i = 0; i < ira_pressure_classes_num; i++)
1116     {
1117       cl = ira_pressure_classes[i];
1118       gcc_assert (curr_reg_pressure[cl] >= 0);
1119       fprintf (sched_dump, "  %s:%d(%d)", reg_class_names[cl],
1120 	       curr_reg_pressure[cl],
1121 	       curr_reg_pressure[cl] - sched_class_regs_num[cl]);
1122     }
1123   fprintf (sched_dump, "\n");
1124 }
1125 
1126 /* Determine if INSN has a condition that is clobbered if a register
1127    in SET_REGS is modified.  */
1128 static bool
cond_clobbered_p(rtx_insn * insn,HARD_REG_SET set_regs)1129 cond_clobbered_p (rtx_insn *insn, HARD_REG_SET set_regs)
1130 {
1131   rtx pat = PATTERN (insn);
1132   gcc_assert (GET_CODE (pat) == COND_EXEC);
1133   if (TEST_HARD_REG_BIT (set_regs, REGNO (XEXP (COND_EXEC_TEST (pat), 0))))
1134     {
1135       sd_iterator_def sd_it;
1136       dep_t dep;
1137       haifa_change_pattern (insn, ORIG_PAT (insn));
1138       FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1139 	DEP_STATUS (dep) &= ~DEP_CANCELLED;
1140       TODO_SPEC (insn) = HARD_DEP;
1141       if (sched_verbose >= 2)
1142 	fprintf (sched_dump,
1143 		 ";;\t\tdequeue insn %s because of clobbered condition\n",
1144 		 (*current_sched_info->print_insn) (insn, 0));
1145       return true;
1146     }
1147 
1148   return false;
1149 }
1150 
1151 /* This function should be called after modifying the pattern of INSN,
1152    to update scheduler data structures as needed.  */
1153 static void
update_insn_after_change(rtx_insn * insn)1154 update_insn_after_change (rtx_insn *insn)
1155 {
1156   sd_iterator_def sd_it;
1157   dep_t dep;
1158 
1159   dfa_clear_single_insn_cache (insn);
1160 
1161   sd_it = sd_iterator_start (insn,
1162 			     SD_LIST_FORW | SD_LIST_BACK | SD_LIST_RES_BACK);
1163   while (sd_iterator_cond (&sd_it, &dep))
1164     {
1165       DEP_COST (dep) = UNKNOWN_DEP_COST;
1166       sd_iterator_next (&sd_it);
1167     }
1168 
1169   /* Invalidate INSN_COST, so it'll be recalculated.  */
1170   INSN_COST (insn) = -1;
1171   /* Invalidate INSN_TICK, so it'll be recalculated.  */
1172   INSN_TICK (insn) = INVALID_TICK;
1173 
1174   /* Invalidate autoprefetch data entry.  */
1175   INSN_AUTOPREF_MULTIPASS_DATA (insn)[0].status
1176     = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
1177   INSN_AUTOPREF_MULTIPASS_DATA (insn)[1].status
1178     = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
1179 }
1180 
1181 
1182 /* Two VECs, one to hold dependencies for which pattern replacements
1183    need to be applied or restored at the start of the next cycle, and
1184    another to hold an integer that is either one, to apply the
1185    corresponding replacement, or zero to restore it.  */
1186 static vec<dep_t> next_cycle_replace_deps;
1187 static vec<int> next_cycle_apply;
1188 
1189 static void apply_replacement (dep_t, bool);
1190 static void restore_pattern (dep_t, bool);
1191 
1192 /* Look at the remaining dependencies for insn NEXT, and compute and return
1193    the TODO_SPEC value we should use for it.  This is called after one of
1194    NEXT's dependencies has been resolved.
1195    We also perform pattern replacements for predication, and for broken
1196    replacement dependencies.  The latter is only done if FOR_BACKTRACK is
1197    false.  */
1198 
1199 static ds_t
recompute_todo_spec(rtx_insn * next,bool for_backtrack)1200 recompute_todo_spec (rtx_insn *next, bool for_backtrack)
1201 {
1202   ds_t new_ds;
1203   sd_iterator_def sd_it;
1204   dep_t dep, modify_dep = NULL;
1205   int n_spec = 0;
1206   int n_control = 0;
1207   int n_replace = 0;
1208   bool first_p = true;
1209 
1210   if (sd_lists_empty_p (next, SD_LIST_BACK))
1211     /* NEXT has all its dependencies resolved.  */
1212     return 0;
1213 
1214   if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
1215     return HARD_DEP;
1216 
1217   /* If NEXT is intended to sit adjacent to this instruction, we don't
1218      want to try to break any dependencies.  Treat it as a HARD_DEP.  */
1219   if (SCHED_GROUP_P (next))
1220     return HARD_DEP;
1221 
1222   /* Now we've got NEXT with speculative deps only.
1223      1. Look at the deps to see what we have to do.
1224      2. Check if we can do 'todo'.  */
1225   new_ds = 0;
1226 
1227   FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1228     {
1229       rtx_insn *pro = DEP_PRO (dep);
1230       ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
1231 
1232       if (DEBUG_INSN_P (pro) && !DEBUG_INSN_P (next))
1233 	continue;
1234 
1235       if (ds)
1236 	{
1237 	  n_spec++;
1238 	  if (first_p)
1239 	    {
1240 	      first_p = false;
1241 
1242 	      new_ds = ds;
1243 	    }
1244 	  else
1245 	    new_ds = ds_merge (new_ds, ds);
1246 	}
1247       else if (DEP_TYPE (dep) == REG_DEP_CONTROL)
1248 	{
1249 	  if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1250 	    {
1251 	      n_control++;
1252 	      modify_dep = dep;
1253 	    }
1254 	  DEP_STATUS (dep) &= ~DEP_CANCELLED;
1255 	}
1256       else if (DEP_REPLACE (dep) != NULL)
1257 	{
1258 	  if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1259 	    {
1260 	      n_replace++;
1261 	      modify_dep = dep;
1262 	    }
1263 	  DEP_STATUS (dep) &= ~DEP_CANCELLED;
1264 	}
1265     }
1266 
1267   if (n_replace > 0 && n_control == 0 && n_spec == 0)
1268     {
1269       if (!dbg_cnt (sched_breakdep))
1270 	return HARD_DEP;
1271       FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1272 	{
1273 	  struct dep_replacement *desc = DEP_REPLACE (dep);
1274 	  if (desc != NULL)
1275 	    {
1276 	      if (desc->insn == next && !for_backtrack)
1277 		{
1278 		  gcc_assert (n_replace == 1);
1279 		  apply_replacement (dep, true);
1280 		}
1281 	      DEP_STATUS (dep) |= DEP_CANCELLED;
1282 	    }
1283 	}
1284       return 0;
1285     }
1286 
1287   else if (n_control == 1 && n_replace == 0 && n_spec == 0)
1288     {
1289       rtx_insn *pro, *other;
1290       rtx new_pat;
1291       rtx cond = NULL_RTX;
1292       bool success;
1293       rtx_insn *prev = NULL;
1294       int i;
1295       unsigned regno;
1296 
1297       if ((current_sched_info->flags & DO_PREDICATION) == 0
1298 	  || (ORIG_PAT (next) != NULL_RTX
1299 	      && PREDICATED_PAT (next) == NULL_RTX))
1300 	return HARD_DEP;
1301 
1302       pro = DEP_PRO (modify_dep);
1303       other = real_insn_for_shadow (pro);
1304       if (other != NULL_RTX)
1305 	pro = other;
1306 
1307       cond = sched_get_reverse_condition_uncached (pro);
1308       regno = REGNO (XEXP (cond, 0));
1309 
1310       /* Find the last scheduled insn that modifies the condition register.
1311 	 We can stop looking once we find the insn we depend on through the
1312 	 REG_DEP_CONTROL; if the condition register isn't modified after it,
1313 	 we know that it still has the right value.  */
1314       if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
1315 	FOR_EACH_VEC_ELT_REVERSE (scheduled_insns, i, prev)
1316 	  {
1317 	    HARD_REG_SET t;
1318 
1319 	    find_all_hard_reg_sets (prev, &t, true);
1320 	    if (TEST_HARD_REG_BIT (t, regno))
1321 	      return HARD_DEP;
1322 	    if (prev == pro)
1323 	      break;
1324 	  }
1325       if (ORIG_PAT (next) == NULL_RTX)
1326 	{
1327 	  ORIG_PAT (next) = PATTERN (next);
1328 
1329 	  new_pat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (next));
1330 	  success = haifa_change_pattern (next, new_pat);
1331 	  if (!success)
1332 	    return HARD_DEP;
1333 	  PREDICATED_PAT (next) = new_pat;
1334 	}
1335       else if (PATTERN (next) != PREDICATED_PAT (next))
1336 	{
1337 	  bool success = haifa_change_pattern (next,
1338 					       PREDICATED_PAT (next));
1339 	  gcc_assert (success);
1340 	}
1341       DEP_STATUS (modify_dep) |= DEP_CANCELLED;
1342       return DEP_CONTROL;
1343     }
1344 
1345   if (PREDICATED_PAT (next) != NULL_RTX)
1346     {
1347       int tick = INSN_TICK (next);
1348       bool success = haifa_change_pattern (next,
1349 					   ORIG_PAT (next));
1350       INSN_TICK (next) = tick;
1351       gcc_assert (success);
1352     }
1353 
1354   /* We can't handle the case where there are both speculative and control
1355      dependencies, so we return HARD_DEP in such a case.  Also fail if
1356      we have speculative dependencies with not enough points, or more than
1357      one control dependency.  */
1358   if ((n_spec > 0 && (n_control > 0 || n_replace > 0))
1359       || (n_spec > 0
1360 	  /* Too few points?  */
1361 	  && ds_weak (new_ds) < spec_info->data_weakness_cutoff)
1362       || n_control > 0
1363       || n_replace > 0)
1364     return HARD_DEP;
1365 
1366   return new_ds;
1367 }
1368 
1369 /* Pointer to the last instruction scheduled.  */
1370 static rtx_insn *last_scheduled_insn;
1371 
1372 /* Pointer to the last nondebug instruction scheduled within the
1373    block, or the prev_head of the scheduling block.  Used by
1374    rank_for_schedule, so that insns independent of the last scheduled
1375    insn will be preferred over dependent instructions.  */
1376 static rtx_insn *last_nondebug_scheduled_insn;
1377 
1378 /* Pointer that iterates through the list of unscheduled insns if we
1379    have a dbg_cnt enabled.  It always points at an insn prior to the
1380    first unscheduled one.  */
1381 static rtx_insn *nonscheduled_insns_begin;
1382 
1383 /* Compute cost of executing INSN.
1384    This is the number of cycles between instruction issue and
1385    instruction results.  */
1386 int
insn_sched_cost(rtx_insn * insn)1387 insn_sched_cost (rtx_insn *insn)
1388 {
1389   int cost;
1390 
1391   if (sched_fusion)
1392     return 0;
1393 
1394   if (sel_sched_p ())
1395     {
1396       if (recog_memoized (insn) < 0)
1397 	return 0;
1398 
1399       cost = insn_default_latency (insn);
1400       if (cost < 0)
1401 	cost = 0;
1402 
1403       return cost;
1404     }
1405 
1406   cost = INSN_COST (insn);
1407 
1408   if (cost < 0)
1409     {
1410       /* A USE insn, or something else we don't need to
1411 	 understand.  We can't pass these directly to
1412 	 result_ready_cost or insn_default_latency because it will
1413 	 trigger a fatal error for unrecognizable insns.  */
1414       if (recog_memoized (insn) < 0)
1415 	{
1416 	  INSN_COST (insn) = 0;
1417 	  return 0;
1418 	}
1419       else
1420 	{
1421 	  cost = insn_default_latency (insn);
1422 	  if (cost < 0)
1423 	    cost = 0;
1424 
1425 	  INSN_COST (insn) = cost;
1426 	}
1427     }
1428 
1429   return cost;
1430 }
1431 
1432 /* Compute cost of dependence LINK.
1433    This is the number of cycles between instruction issue and
1434    instruction results.
1435    ??? We also use this function to call recog_memoized on all insns.  */
1436 int
dep_cost_1(dep_t link,dw_t dw)1437 dep_cost_1 (dep_t link, dw_t dw)
1438 {
1439   rtx_insn *insn = DEP_PRO (link);
1440   rtx_insn *used = DEP_CON (link);
1441   int cost;
1442 
1443   if (DEP_COST (link) != UNKNOWN_DEP_COST)
1444     return DEP_COST (link);
1445 
1446   if (delay_htab)
1447     {
1448       struct delay_pair *delay_entry;
1449       delay_entry
1450 	= delay_htab_i2->find_with_hash (used, htab_hash_pointer (used));
1451       if (delay_entry)
1452 	{
1453 	  if (delay_entry->i1 == insn)
1454 	    {
1455 	      DEP_COST (link) = pair_delay (delay_entry);
1456 	      return DEP_COST (link);
1457 	    }
1458 	}
1459     }
1460 
1461   /* A USE insn should never require the value used to be computed.
1462      This allows the computation of a function's result and parameter
1463      values to overlap the return and call.  We don't care about the
1464      dependence cost when only decreasing register pressure.  */
1465   if (recog_memoized (used) < 0)
1466     {
1467       cost = 0;
1468       recog_memoized (insn);
1469     }
1470   else
1471     {
1472       enum reg_note dep_type = DEP_TYPE (link);
1473 
1474       cost = insn_sched_cost (insn);
1475 
1476       if (INSN_CODE (insn) >= 0)
1477 	{
1478 	  if (dep_type == REG_DEP_ANTI)
1479 	    cost = 0;
1480 	  else if (dep_type == REG_DEP_OUTPUT)
1481 	    {
1482 	      cost = (insn_default_latency (insn)
1483 		      - insn_default_latency (used));
1484 	      if (cost <= 0)
1485 		cost = 1;
1486 	    }
1487 	  else if (bypass_p (insn))
1488 	    cost = insn_latency (insn, used);
1489 	}
1490 
1491 
1492       if (targetm.sched.adjust_cost)
1493 	cost = targetm.sched.adjust_cost (used, (int) dep_type, insn, cost,
1494 					  dw);
1495 
1496       if (cost < 0)
1497 	cost = 0;
1498     }
1499 
1500   DEP_COST (link) = cost;
1501   return cost;
1502 }
1503 
1504 /* Compute cost of dependence LINK.
1505    This is the number of cycles between instruction issue and
1506    instruction results.  */
1507 int
dep_cost(dep_t link)1508 dep_cost (dep_t link)
1509 {
1510   return dep_cost_1 (link, 0);
1511 }
1512 
1513 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
1514    INSN_PRIORITY explicitly.  */
1515 void
increase_insn_priority(rtx_insn * insn,int amount)1516 increase_insn_priority (rtx_insn *insn, int amount)
1517 {
1518   if (!sel_sched_p ())
1519     {
1520       /* We're dealing with haifa-sched.c INSN_PRIORITY.  */
1521       if (INSN_PRIORITY_KNOWN (insn))
1522 	  INSN_PRIORITY (insn) += amount;
1523     }
1524   else
1525     {
1526       /* In sel-sched.c INSN_PRIORITY is not kept up to date.
1527 	 Use EXPR_PRIORITY instead. */
1528       sel_add_to_insn_priority (insn, amount);
1529     }
1530 }
1531 
1532 /* Return 'true' if DEP should be included in priority calculations.  */
1533 static bool
contributes_to_priority_p(dep_t dep)1534 contributes_to_priority_p (dep_t dep)
1535 {
1536   if (DEBUG_INSN_P (DEP_CON (dep))
1537       || DEBUG_INSN_P (DEP_PRO (dep)))
1538     return false;
1539 
1540   /* Critical path is meaningful in block boundaries only.  */
1541   if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
1542 						    DEP_PRO (dep)))
1543     return false;
1544 
1545   if (DEP_REPLACE (dep) != NULL)
1546     return false;
1547 
1548   /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
1549      then speculative instructions will less likely be
1550      scheduled.  That is because the priority of
1551      their producers will increase, and, thus, the
1552      producers will more likely be scheduled, thus,
1553      resolving the dependence.  */
1554   if (sched_deps_info->generate_spec_deps
1555       && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
1556       && (DEP_STATUS (dep) & SPECULATIVE))
1557     return false;
1558 
1559   return true;
1560 }
1561 
1562 /* Compute the number of nondebug deps in list LIST for INSN.  */
1563 
1564 static int
dep_list_size(rtx_insn * insn,sd_list_types_def list)1565 dep_list_size (rtx_insn *insn, sd_list_types_def list)
1566 {
1567   sd_iterator_def sd_it;
1568   dep_t dep;
1569   int dbgcount = 0, nodbgcount = 0;
1570 
1571   if (!MAY_HAVE_DEBUG_INSNS)
1572     return sd_lists_size (insn, list);
1573 
1574   FOR_EACH_DEP (insn, list, sd_it, dep)
1575     {
1576       if (DEBUG_INSN_P (DEP_CON (dep)))
1577 	dbgcount++;
1578       else if (!DEBUG_INSN_P (DEP_PRO (dep)))
1579 	nodbgcount++;
1580     }
1581 
1582   gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, list));
1583 
1584   return nodbgcount;
1585 }
1586 
1587 bool sched_fusion;
1588 
1589 /* Compute the priority number for INSN.  */
1590 static int
priority(rtx_insn * insn,bool force_recompute)1591 priority (rtx_insn *insn, bool force_recompute)
1592 {
1593   if (! INSN_P (insn))
1594     return 0;
1595 
1596   /* We should not be interested in priority of an already scheduled insn.  */
1597   gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
1598 
1599   if (force_recompute || !INSN_PRIORITY_KNOWN (insn))
1600     {
1601       int this_priority = -1;
1602 
1603       if (sched_fusion)
1604 	{
1605 	  int this_fusion_priority;
1606 
1607 	  targetm.sched.fusion_priority (insn, FUSION_MAX_PRIORITY,
1608 					 &this_fusion_priority, &this_priority);
1609 	  INSN_FUSION_PRIORITY (insn) = this_fusion_priority;
1610 	}
1611       else if (dep_list_size (insn, SD_LIST_FORW) == 0)
1612 	/* ??? We should set INSN_PRIORITY to insn_sched_cost when and insn
1613 	   has some forward deps but all of them are ignored by
1614 	   contributes_to_priority hook.  At the moment we set priority of
1615 	   such insn to 0.  */
1616 	this_priority = insn_sched_cost (insn);
1617       else
1618 	{
1619 	  rtx_insn *prev_first, *twin;
1620 	  basic_block rec;
1621 
1622 	  /* For recovery check instructions we calculate priority slightly
1623 	     different than that of normal instructions.  Instead of walking
1624 	     through INSN_FORW_DEPS (check) list, we walk through
1625 	     INSN_FORW_DEPS list of each instruction in the corresponding
1626 	     recovery block.  */
1627 
1628           /* Selective scheduling does not define RECOVERY_BLOCK macro.  */
1629 	  rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1630 	  if (!rec || rec == EXIT_BLOCK_PTR_FOR_FN (cfun))
1631 	    {
1632 	      prev_first = PREV_INSN (insn);
1633 	      twin = insn;
1634 	    }
1635 	  else
1636 	    {
1637 	      prev_first = NEXT_INSN (BB_HEAD (rec));
1638 	      twin = PREV_INSN (BB_END (rec));
1639 	    }
1640 
1641 	  do
1642 	    {
1643 	      sd_iterator_def sd_it;
1644 	      dep_t dep;
1645 
1646 	      FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1647 		{
1648 		  rtx_insn *next;
1649 		  int next_priority;
1650 
1651 		  next = DEP_CON (dep);
1652 
1653 		  if (BLOCK_FOR_INSN (next) != rec)
1654 		    {
1655 		      int cost;
1656 
1657 		      if (!contributes_to_priority_p (dep))
1658 			continue;
1659 
1660 		      if (twin == insn)
1661 			cost = dep_cost (dep);
1662 		      else
1663 			{
1664 			  struct _dep _dep1, *dep1 = &_dep1;
1665 
1666 			  init_dep (dep1, insn, next, REG_DEP_ANTI);
1667 
1668 			  cost = dep_cost (dep1);
1669 			}
1670 
1671 		      next_priority = cost + priority (next);
1672 
1673 		      if (next_priority > this_priority)
1674 			this_priority = next_priority;
1675 		    }
1676 		}
1677 
1678 	      twin = PREV_INSN (twin);
1679 	    }
1680 	  while (twin != prev_first);
1681 	}
1682 
1683       if (this_priority < 0)
1684 	{
1685 	  gcc_assert (this_priority == -1);
1686 
1687 	  this_priority = insn_sched_cost (insn);
1688 	}
1689 
1690       INSN_PRIORITY (insn) = this_priority;
1691       INSN_PRIORITY_STATUS (insn) = 1;
1692     }
1693 
1694   return INSN_PRIORITY (insn);
1695 }
1696 
1697 /* Macros and functions for keeping the priority queue sorted, and
1698    dealing with queuing and dequeuing of instructions.  */
1699 
1700 /* For each pressure class CL, set DEATH[CL] to the number of registers
1701    in that class that die in INSN.  */
1702 
1703 static void
calculate_reg_deaths(rtx_insn * insn,int * death)1704 calculate_reg_deaths (rtx_insn *insn, int *death)
1705 {
1706   int i;
1707   struct reg_use_data *use;
1708 
1709   for (i = 0; i < ira_pressure_classes_num; i++)
1710     death[ira_pressure_classes[i]] = 0;
1711   for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1712     if (dying_use_p (use))
1713       mark_regno_birth_or_death (0, death, use->regno, true);
1714 }
1715 
1716 /* Setup info about the current register pressure impact of scheduling
1717    INSN at the current scheduling point.  */
1718 static void
setup_insn_reg_pressure_info(rtx_insn * insn)1719 setup_insn_reg_pressure_info (rtx_insn *insn)
1720 {
1721   int i, change, before, after, hard_regno;
1722   int excess_cost_change;
1723   machine_mode mode;
1724   enum reg_class cl;
1725   struct reg_pressure_data *pressure_info;
1726   int *max_reg_pressure;
1727   static int death[N_REG_CLASSES];
1728 
1729   gcc_checking_assert (!DEBUG_INSN_P (insn));
1730 
1731   excess_cost_change = 0;
1732   calculate_reg_deaths (insn, death);
1733   pressure_info = INSN_REG_PRESSURE (insn);
1734   max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1735   gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1736   for (i = 0; i < ira_pressure_classes_num; i++)
1737     {
1738       cl = ira_pressure_classes[i];
1739       gcc_assert (curr_reg_pressure[cl] >= 0);
1740       change = (int) pressure_info[i].set_increase - death[cl];
1741       before = MAX (0, max_reg_pressure[i] - sched_class_regs_num[cl]);
1742       after = MAX (0, max_reg_pressure[i] + change
1743 		   - sched_class_regs_num[cl]);
1744       hard_regno = ira_class_hard_regs[cl][0];
1745       gcc_assert (hard_regno >= 0);
1746       mode = reg_raw_mode[hard_regno];
1747       excess_cost_change += ((after - before)
1748 			     * (ira_memory_move_cost[mode][cl][0]
1749 				+ ira_memory_move_cost[mode][cl][1]));
1750     }
1751   INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1752 }
1753 
1754 /* This is the first page of code related to SCHED_PRESSURE_MODEL.
1755    It tries to make the scheduler take register pressure into account
1756    without introducing too many unnecessary stalls.  It hooks into the
1757    main scheduling algorithm at several points:
1758 
1759     - Before scheduling starts, model_start_schedule constructs a
1760       "model schedule" for the current block.  This model schedule is
1761       chosen solely to keep register pressure down.  It does not take the
1762       target's pipeline or the original instruction order into account,
1763       except as a tie-breaker.  It also doesn't work to a particular
1764       pressure limit.
1765 
1766       This model schedule gives us an idea of what pressure can be
1767       achieved for the block and gives us an example of a schedule that
1768       keeps to that pressure.  It also makes the final schedule less
1769       dependent on the original instruction order.  This is important
1770       because the original order can either be "wide" (many values live
1771       at once, such as in user-scheduled code) or "narrow" (few values
1772       live at once, such as after loop unrolling, where several
1773       iterations are executed sequentially).
1774 
1775       We do not apply this model schedule to the rtx stream.  We simply
1776       record it in model_schedule.  We also compute the maximum pressure,
1777       MP, that was seen during this schedule.
1778 
1779     - Instructions are added to the ready queue even if they require
1780       a stall.  The length of the stall is instead computed as:
1781 
1782 	 MAX (INSN_TICK (INSN) - clock_var, 0)
1783 
1784       (= insn_delay).  This allows rank_for_schedule to choose between
1785       introducing a deliberate stall or increasing pressure.
1786 
1787     - Before sorting the ready queue, model_set_excess_costs assigns
1788       a pressure-based cost to each ready instruction in the queue.
1789       This is the instruction's INSN_REG_PRESSURE_EXCESS_COST_CHANGE
1790       (ECC for short) and is effectively measured in cycles.
1791 
1792     - rank_for_schedule ranks instructions based on:
1793 
1794 	ECC (insn) + insn_delay (insn)
1795 
1796       then as:
1797 
1798 	insn_delay (insn)
1799 
1800       So, for example, an instruction X1 with an ECC of 1 that can issue
1801       now will win over an instruction X0 with an ECC of zero that would
1802       introduce a stall of one cycle.  However, an instruction X2 with an
1803       ECC of 2 that can issue now will lose to both X0 and X1.
1804 
1805     - When an instruction is scheduled, model_recompute updates the model
1806       schedule with the new pressures (some of which might now exceed the
1807       original maximum pressure MP).  model_update_limit_points then searches
1808       for the new point of maximum pressure, if not already known.  */
1809 
1810 /* Used to separate high-verbosity debug information for SCHED_PRESSURE_MODEL
1811    from surrounding debug information.  */
1812 #define MODEL_BAR \
1813   ";;\t\t+------------------------------------------------------\n"
1814 
1815 /* Information about the pressure on a particular register class at a
1816    particular point of the model schedule.  */
1817 struct model_pressure_data {
1818   /* The pressure at this point of the model schedule, or -1 if the
1819      point is associated with an instruction that has already been
1820      scheduled.  */
1821   int ref_pressure;
1822 
1823   /* The maximum pressure during or after this point of the model schedule.  */
1824   int max_pressure;
1825 };
1826 
1827 /* Per-instruction information that is used while building the model
1828    schedule.  Here, "schedule" refers to the model schedule rather
1829    than the main schedule.  */
1830 struct model_insn_info {
1831   /* The instruction itself.  */
1832   rtx_insn *insn;
1833 
1834   /* If this instruction is in model_worklist, these fields link to the
1835      previous (higher-priority) and next (lower-priority) instructions
1836      in the list.  */
1837   struct model_insn_info *prev;
1838   struct model_insn_info *next;
1839 
1840   /* While constructing the schedule, QUEUE_INDEX describes whether an
1841      instruction has already been added to the schedule (QUEUE_SCHEDULED),
1842      is in model_worklist (QUEUE_READY), or neither (QUEUE_NOWHERE).
1843      old_queue records the value that QUEUE_INDEX had before scheduling
1844      started, so that we can restore it once the schedule is complete.  */
1845   int old_queue;
1846 
1847   /* The relative importance of an unscheduled instruction.  Higher
1848      values indicate greater importance.  */
1849   unsigned int model_priority;
1850 
1851   /* The length of the longest path of satisfied true dependencies
1852      that leads to this instruction.  */
1853   unsigned int depth;
1854 
1855   /* The length of the longest path of dependencies of any kind
1856      that leads from this instruction.  */
1857   unsigned int alap;
1858 
1859   /* The number of predecessor nodes that must still be scheduled.  */
1860   int unscheduled_preds;
1861 };
1862 
1863 /* Information about the pressure limit for a particular register class.
1864    This structure is used when applying a model schedule to the main
1865    schedule.  */
1866 struct model_pressure_limit {
1867   /* The maximum register pressure seen in the original model schedule.  */
1868   int orig_pressure;
1869 
1870   /* The maximum register pressure seen in the current model schedule
1871      (which excludes instructions that have already been scheduled).  */
1872   int pressure;
1873 
1874   /* The point of the current model schedule at which PRESSURE is first
1875      reached.  It is set to -1 if the value needs to be recomputed.  */
1876   int point;
1877 };
1878 
1879 /* Describes a particular way of measuring register pressure.  */
1880 struct model_pressure_group {
1881   /* Index PCI describes the maximum pressure on ira_pressure_classes[PCI].  */
1882   struct model_pressure_limit limits[N_REG_CLASSES];
1883 
1884   /* Index (POINT * ira_num_pressure_classes + PCI) describes the pressure
1885      on register class ira_pressure_classes[PCI] at point POINT of the
1886      current model schedule.  A POINT of model_num_insns describes the
1887      pressure at the end of the schedule.  */
1888   struct model_pressure_data *model;
1889 };
1890 
1891 /* Index POINT gives the instruction at point POINT of the model schedule.
1892    This array doesn't change during main scheduling.  */
1893 static vec<rtx_insn *> model_schedule;
1894 
1895 /* The list of instructions in the model worklist, sorted in order of
1896    decreasing priority.  */
1897 static struct model_insn_info *model_worklist;
1898 
1899 /* Index I describes the instruction with INSN_LUID I.  */
1900 static struct model_insn_info *model_insns;
1901 
1902 /* The number of instructions in the model schedule.  */
1903 static int model_num_insns;
1904 
1905 /* The index of the first instruction in model_schedule that hasn't yet been
1906    added to the main schedule, or model_num_insns if all of them have.  */
1907 static int model_curr_point;
1908 
1909 /* Describes the pressure before each instruction in the model schedule.  */
1910 static struct model_pressure_group model_before_pressure;
1911 
1912 /* The first unused model_priority value (as used in model_insn_info).  */
1913 static unsigned int model_next_priority;
1914 
1915 
1916 /* The model_pressure_data for ira_pressure_classes[PCI] in GROUP
1917    at point POINT of the model schedule.  */
1918 #define MODEL_PRESSURE_DATA(GROUP, POINT, PCI) \
1919   (&(GROUP)->model[(POINT) * ira_pressure_classes_num + (PCI)])
1920 
1921 /* The maximum pressure on ira_pressure_classes[PCI] in GROUP at or
1922    after point POINT of the model schedule.  */
1923 #define MODEL_MAX_PRESSURE(GROUP, POINT, PCI) \
1924   (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->max_pressure)
1925 
1926 /* The pressure on ira_pressure_classes[PCI] in GROUP at point POINT
1927    of the model schedule.  */
1928 #define MODEL_REF_PRESSURE(GROUP, POINT, PCI) \
1929   (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->ref_pressure)
1930 
1931 /* Information about INSN that is used when creating the model schedule.  */
1932 #define MODEL_INSN_INFO(INSN) \
1933   (&model_insns[INSN_LUID (INSN)])
1934 
1935 /* The instruction at point POINT of the model schedule.  */
1936 #define MODEL_INSN(POINT) \
1937   (model_schedule[POINT])
1938 
1939 
1940 /* Return INSN's index in the model schedule, or model_num_insns if it
1941    doesn't belong to that schedule.  */
1942 
1943 static int
model_index(rtx_insn * insn)1944 model_index (rtx_insn *insn)
1945 {
1946   if (INSN_MODEL_INDEX (insn) == 0)
1947     return model_num_insns;
1948   return INSN_MODEL_INDEX (insn) - 1;
1949 }
1950 
1951 /* Make sure that GROUP->limits is up-to-date for the current point
1952    of the model schedule.  */
1953 
1954 static void
model_update_limit_points_in_group(struct model_pressure_group * group)1955 model_update_limit_points_in_group (struct model_pressure_group *group)
1956 {
1957   int pci, max_pressure, point;
1958 
1959   for (pci = 0; pci < ira_pressure_classes_num; pci++)
1960     {
1961       /* We may have passed the final point at which the pressure in
1962 	 group->limits[pci].pressure was reached.  Update the limit if so.  */
1963       max_pressure = MODEL_MAX_PRESSURE (group, model_curr_point, pci);
1964       group->limits[pci].pressure = max_pressure;
1965 
1966       /* Find the point at which MAX_PRESSURE is first reached.  We need
1967 	 to search in three cases:
1968 
1969 	 - We've already moved past the previous pressure point.
1970 	   In this case we search forward from model_curr_point.
1971 
1972 	 - We scheduled the previous point of maximum pressure ahead of
1973 	   its position in the model schedule, but doing so didn't bring
1974 	   the pressure point earlier.  In this case we search forward
1975 	   from that previous pressure point.
1976 
1977 	 - Scheduling an instruction early caused the maximum pressure
1978 	   to decrease.  In this case we will have set the pressure
1979 	   point to -1, and we search forward from model_curr_point.  */
1980       point = MAX (group->limits[pci].point, model_curr_point);
1981       while (point < model_num_insns
1982 	     && MODEL_REF_PRESSURE (group, point, pci) < max_pressure)
1983 	point++;
1984       group->limits[pci].point = point;
1985 
1986       gcc_assert (MODEL_REF_PRESSURE (group, point, pci) == max_pressure);
1987       gcc_assert (MODEL_MAX_PRESSURE (group, point, pci) == max_pressure);
1988     }
1989 }
1990 
1991 /* Make sure that all register-pressure limits are up-to-date for the
1992    current position in the model schedule.  */
1993 
1994 static void
model_update_limit_points(void)1995 model_update_limit_points (void)
1996 {
1997   model_update_limit_points_in_group (&model_before_pressure);
1998 }
1999 
2000 /* Return the model_index of the last unscheduled use in chain USE
2001    outside of USE's instruction.  Return -1 if there are no other uses,
2002    or model_num_insns if the register is live at the end of the block.  */
2003 
2004 static int
model_last_use_except(struct reg_use_data * use)2005 model_last_use_except (struct reg_use_data *use)
2006 {
2007   struct reg_use_data *next;
2008   int last, index;
2009 
2010   last = -1;
2011   for (next = use->next_regno_use; next != use; next = next->next_regno_use)
2012     if (NONDEBUG_INSN_P (next->insn)
2013 	&& QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
2014       {
2015 	index = model_index (next->insn);
2016 	if (index == model_num_insns)
2017 	  return model_num_insns;
2018 	if (last < index)
2019 	  last = index;
2020       }
2021   return last;
2022 }
2023 
2024 /* An instruction with model_index POINT has just been scheduled, and it
2025    adds DELTA to the pressure on ira_pressure_classes[PCI] after POINT - 1.
2026    Update MODEL_REF_PRESSURE (GROUP, POINT, PCI) and
2027    MODEL_MAX_PRESSURE (GROUP, POINT, PCI) accordingly.  */
2028 
2029 static void
model_start_update_pressure(struct model_pressure_group * group,int point,int pci,int delta)2030 model_start_update_pressure (struct model_pressure_group *group,
2031 			     int point, int pci, int delta)
2032 {
2033   int next_max_pressure;
2034 
2035   if (point == model_num_insns)
2036     {
2037       /* The instruction wasn't part of the model schedule; it was moved
2038 	 from a different block.  Update the pressure for the end of
2039 	 the model schedule.  */
2040       MODEL_REF_PRESSURE (group, point, pci) += delta;
2041       MODEL_MAX_PRESSURE (group, point, pci) += delta;
2042     }
2043   else
2044     {
2045       /* Record that this instruction has been scheduled.  Nothing now
2046 	 changes between POINT and POINT + 1, so get the maximum pressure
2047 	 from the latter.  If the maximum pressure decreases, the new
2048 	 pressure point may be before POINT.  */
2049       MODEL_REF_PRESSURE (group, point, pci) = -1;
2050       next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2051       if (MODEL_MAX_PRESSURE (group, point, pci) > next_max_pressure)
2052 	{
2053 	  MODEL_MAX_PRESSURE (group, point, pci) = next_max_pressure;
2054 	  if (group->limits[pci].point == point)
2055 	    group->limits[pci].point = -1;
2056 	}
2057     }
2058 }
2059 
2060 /* Record that scheduling a later instruction has changed the pressure
2061    at point POINT of the model schedule by DELTA (which might be 0).
2062    Update GROUP accordingly.  Return nonzero if these changes might
2063    trigger changes to previous points as well.  */
2064 
2065 static int
model_update_pressure(struct model_pressure_group * group,int point,int pci,int delta)2066 model_update_pressure (struct model_pressure_group *group,
2067 		       int point, int pci, int delta)
2068 {
2069   int ref_pressure, max_pressure, next_max_pressure;
2070 
2071   /* If POINT hasn't yet been scheduled, update its pressure.  */
2072   ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
2073   if (ref_pressure >= 0 && delta != 0)
2074     {
2075       ref_pressure += delta;
2076       MODEL_REF_PRESSURE (group, point, pci) = ref_pressure;
2077 
2078       /* Check whether the maximum pressure in the overall schedule
2079 	 has increased.  (This means that the MODEL_MAX_PRESSURE of
2080 	 every point <= POINT will need to increase too; see below.)  */
2081       if (group->limits[pci].pressure < ref_pressure)
2082 	group->limits[pci].pressure = ref_pressure;
2083 
2084       /* If we are at maximum pressure, and the maximum pressure
2085 	 point was previously unknown or later than POINT,
2086 	 bring it forward.  */
2087       if (group->limits[pci].pressure == ref_pressure
2088 	  && !IN_RANGE (group->limits[pci].point, 0, point))
2089 	group->limits[pci].point = point;
2090 
2091       /* If POINT used to be the point of maximum pressure, but isn't
2092 	 any longer, we need to recalculate it using a forward walk.  */
2093       if (group->limits[pci].pressure > ref_pressure
2094 	  && group->limits[pci].point == point)
2095 	group->limits[pci].point = -1;
2096     }
2097 
2098   /* Update the maximum pressure at POINT.  Changes here might also
2099      affect the maximum pressure at POINT - 1.  */
2100   next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2101   max_pressure = MAX (ref_pressure, next_max_pressure);
2102   if (MODEL_MAX_PRESSURE (group, point, pci) != max_pressure)
2103     {
2104       MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
2105       return 1;
2106     }
2107   return 0;
2108 }
2109 
2110 /* INSN has just been scheduled.  Update the model schedule accordingly.  */
2111 
2112 static void
model_recompute(rtx_insn * insn)2113 model_recompute (rtx_insn *insn)
2114 {
2115   struct {
2116     int last_use;
2117     int regno;
2118   } uses[FIRST_PSEUDO_REGISTER + MAX_RECOG_OPERANDS];
2119   struct reg_use_data *use;
2120   struct reg_pressure_data *reg_pressure;
2121   int delta[N_REG_CLASSES];
2122   int pci, point, mix, new_last, cl, ref_pressure, queue;
2123   unsigned int i, num_uses, num_pending_births;
2124   bool print_p;
2125 
2126   /* The destinations of INSN were previously live from POINT onwards, but are
2127      now live from model_curr_point onwards.  Set up DELTA accordingly.  */
2128   point = model_index (insn);
2129   reg_pressure = INSN_REG_PRESSURE (insn);
2130   for (pci = 0; pci < ira_pressure_classes_num; pci++)
2131     {
2132       cl = ira_pressure_classes[pci];
2133       delta[cl] = reg_pressure[pci].set_increase;
2134     }
2135 
2136   /* Record which registers previously died at POINT, but which now die
2137      before POINT.  Adjust DELTA so that it represents the effect of
2138      this change after POINT - 1.  Set NUM_PENDING_BIRTHS to the number of
2139      registers that will be born in the range [model_curr_point, POINT).  */
2140   num_uses = 0;
2141   num_pending_births = 0;
2142   bitmap_clear (tmp_bitmap);
2143   for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2144     {
2145       new_last = model_last_use_except (use);
2146       if (new_last < point && bitmap_set_bit (tmp_bitmap, use->regno))
2147 	{
2148 	  gcc_assert (num_uses < ARRAY_SIZE (uses));
2149 	  uses[num_uses].last_use = new_last;
2150 	  uses[num_uses].regno = use->regno;
2151 	  /* This register is no longer live after POINT - 1.  */
2152 	  mark_regno_birth_or_death (NULL, delta, use->regno, false);
2153 	  num_uses++;
2154 	  if (new_last >= 0)
2155 	    num_pending_births++;
2156 	}
2157     }
2158 
2159   /* Update the MODEL_REF_PRESSURE and MODEL_MAX_PRESSURE for POINT.
2160      Also set each group pressure limit for POINT.  */
2161   for (pci = 0; pci < ira_pressure_classes_num; pci++)
2162     {
2163       cl = ira_pressure_classes[pci];
2164       model_start_update_pressure (&model_before_pressure,
2165 				   point, pci, delta[cl]);
2166     }
2167 
2168   /* Walk the model schedule backwards, starting immediately before POINT.  */
2169   print_p = false;
2170   if (point != model_curr_point)
2171     do
2172       {
2173 	point--;
2174 	insn = MODEL_INSN (point);
2175 	queue = QUEUE_INDEX (insn);
2176 
2177 	if (queue != QUEUE_SCHEDULED)
2178 	  {
2179 	    /* DELTA describes the effect of the move on the register pressure
2180 	       after POINT.  Make it describe the effect on the pressure
2181 	       before POINT.  */
2182 	    i = 0;
2183 	    while (i < num_uses)
2184 	      {
2185 		if (uses[i].last_use == point)
2186 		  {
2187 		    /* This register is now live again.  */
2188 		    mark_regno_birth_or_death (NULL, delta,
2189 					       uses[i].regno, true);
2190 
2191 		    /* Remove this use from the array.  */
2192 		    uses[i] = uses[num_uses - 1];
2193 		    num_uses--;
2194 		    num_pending_births--;
2195 		  }
2196 		else
2197 		  i++;
2198 	      }
2199 
2200 	    if (sched_verbose >= 5)
2201 	      {
2202 		if (!print_p)
2203 		  {
2204 		    fprintf (sched_dump, MODEL_BAR);
2205 		    fprintf (sched_dump, ";;\t\t| New pressure for model"
2206 			     " schedule\n");
2207 		    fprintf (sched_dump, MODEL_BAR);
2208 		    print_p = true;
2209 		  }
2210 
2211 		fprintf (sched_dump, ";;\t\t| %3d %4d %-30s ",
2212 			 point, INSN_UID (insn),
2213 			 str_pattern_slim (PATTERN (insn)));
2214 		for (pci = 0; pci < ira_pressure_classes_num; pci++)
2215 		  {
2216 		    cl = ira_pressure_classes[pci];
2217 		    ref_pressure = MODEL_REF_PRESSURE (&model_before_pressure,
2218 						       point, pci);
2219 		    fprintf (sched_dump, " %s:[%d->%d]",
2220 			     reg_class_names[ira_pressure_classes[pci]],
2221 			     ref_pressure, ref_pressure + delta[cl]);
2222 		  }
2223 		fprintf (sched_dump, "\n");
2224 	      }
2225 	  }
2226 
2227 	/* Adjust the pressure at POINT.  Set MIX to nonzero if POINT - 1
2228 	   might have changed as well.  */
2229 	mix = num_pending_births;
2230 	for (pci = 0; pci < ira_pressure_classes_num; pci++)
2231 	  {
2232 	    cl = ira_pressure_classes[pci];
2233 	    mix |= delta[cl];
2234 	    mix |= model_update_pressure (&model_before_pressure,
2235 					  point, pci, delta[cl]);
2236 	  }
2237       }
2238     while (mix && point > model_curr_point);
2239 
2240   if (print_p)
2241     fprintf (sched_dump, MODEL_BAR);
2242 }
2243 
2244 /* After DEP, which was cancelled, has been resolved for insn NEXT,
2245    check whether the insn's pattern needs restoring.  */
2246 static bool
must_restore_pattern_p(rtx_insn * next,dep_t dep)2247 must_restore_pattern_p (rtx_insn *next, dep_t dep)
2248 {
2249   if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
2250     return false;
2251 
2252   if (DEP_TYPE (dep) == REG_DEP_CONTROL)
2253     {
2254       gcc_assert (ORIG_PAT (next) != NULL_RTX);
2255       gcc_assert (next == DEP_CON (dep));
2256     }
2257   else
2258     {
2259       struct dep_replacement *desc = DEP_REPLACE (dep);
2260       if (desc->insn != next)
2261 	{
2262 	  gcc_assert (*desc->loc == desc->orig);
2263 	  return false;
2264 	}
2265     }
2266   return true;
2267 }
2268 
2269 /* model_spill_cost (CL, P, P') returns the cost of increasing the
2270    pressure on CL from P to P'.  We use this to calculate a "base ECC",
2271    baseECC (CL, X), for each pressure class CL and each instruction X.
2272    Supposing X changes the pressure on CL from P to P', and that the
2273    maximum pressure on CL in the current model schedule is MP', then:
2274 
2275    * if X occurs before or at the next point of maximum pressure in
2276      the model schedule and P' > MP', then:
2277 
2278        baseECC (CL, X) = model_spill_cost (CL, MP, P')
2279 
2280      The idea is that the pressure after scheduling a fixed set of
2281      instructions -- in this case, the set up to and including the
2282      next maximum pressure point -- is going to be the same regardless
2283      of the order; we simply want to keep the intermediate pressure
2284      under control.  Thus X has a cost of zero unless scheduling it
2285      now would exceed MP'.
2286 
2287      If all increases in the set are by the same amount, no zero-cost
2288      instruction will ever cause the pressure to exceed MP'.  However,
2289      if X is instead moved past an instruction X' with pressure in the
2290      range (MP' - (P' - P), MP'), the pressure at X' will increase
2291      beyond MP'.  Since baseECC is very much a heuristic anyway,
2292      it doesn't seem worth the overhead of tracking cases like these.
2293 
2294      The cost of exceeding MP' is always based on the original maximum
2295      pressure MP.  This is so that going 2 registers over the original
2296      limit has the same cost regardless of whether it comes from two
2297      separate +1 deltas or from a single +2 delta.
2298 
2299    * if X occurs after the next point of maximum pressure in the model
2300      schedule and P' > P, then:
2301 
2302        baseECC (CL, X) = model_spill_cost (CL, MP, MP' + (P' - P))
2303 
2304      That is, if we move X forward across a point of maximum pressure,
2305      and if X increases the pressure by P' - P, then we conservatively
2306      assume that scheduling X next would increase the maximum pressure
2307      by P' - P.  Again, the cost of doing this is based on the original
2308      maximum pressure MP, for the same reason as above.
2309 
2310    * if P' < P, P > MP, and X occurs at or after the next point of
2311      maximum pressure, then:
2312 
2313        baseECC (CL, X) = -model_spill_cost (CL, MAX (MP, P'), P)
2314 
2315      That is, if we have already exceeded the original maximum pressure MP,
2316      and if X might reduce the maximum pressure again -- or at least push
2317      it further back, and thus allow more scheduling freedom -- it is given
2318      a negative cost to reflect the improvement.
2319 
2320    * otherwise,
2321 
2322        baseECC (CL, X) = 0
2323 
2324      In this case, X is not expected to affect the maximum pressure MP',
2325      so it has zero cost.
2326 
2327    We then create a combined value baseECC (X) that is the sum of
2328    baseECC (CL, X) for each pressure class CL.
2329 
2330    baseECC (X) could itself be used as the ECC value described above.
2331    However, this is often too conservative, in the sense that it
2332    tends to make high-priority instructions that increase pressure
2333    wait too long in cases where introducing a spill would be better.
2334    For this reason the final ECC is a priority-adjusted form of
2335    baseECC (X).  Specifically, we calculate:
2336 
2337      P (X) = INSN_PRIORITY (X) - insn_delay (X) - baseECC (X)
2338      baseP = MAX { P (X) | baseECC (X) <= 0 }
2339 
2340    Then:
2341 
2342      ECC (X) = MAX (MIN (baseP - P (X), baseECC (X)), 0)
2343 
2344    Thus an instruction's effect on pressure is ignored if it has a high
2345    enough priority relative to the ones that don't increase pressure.
2346    Negative values of baseECC (X) do not increase the priority of X
2347    itself, but they do make it harder for other instructions to
2348    increase the pressure further.
2349 
2350    This pressure cost is deliberately timid.  The intention has been
2351    to choose a heuristic that rarely interferes with the normal list
2352    scheduler in cases where that scheduler would produce good code.
2353    We simply want to curb some of its worst excesses.  */
2354 
2355 /* Return the cost of increasing the pressure in class CL from FROM to TO.
2356 
2357    Here we use the very simplistic cost model that every register above
2358    sched_class_regs_num[CL] has a spill cost of 1.  We could use other
2359    measures instead, such as one based on MEMORY_MOVE_COST.  However:
2360 
2361       (1) In order for an instruction to be scheduled, the higher cost
2362 	  would need to be justified in a single saving of that many stalls.
2363 	  This is overly pessimistic, because the benefit of spilling is
2364 	  often to avoid a sequence of several short stalls rather than
2365 	  a single long one.
2366 
2367       (2) The cost is still arbitrary.  Because we are not allocating
2368 	  registers during scheduling, we have no way of knowing for
2369 	  sure how many memory accesses will be required by each spill,
2370 	  where the spills will be placed within the block, or even
2371 	  which block(s) will contain the spills.
2372 
2373    So a higher cost than 1 is often too conservative in practice,
2374    forcing blocks to contain unnecessary stalls instead of spill code.
2375    The simple cost below seems to be the best compromise.  It reduces
2376    the interference with the normal list scheduler, which helps make
2377    it more suitable for a default-on option.  */
2378 
2379 static int
model_spill_cost(int cl,int from,int to)2380 model_spill_cost (int cl, int from, int to)
2381 {
2382   from = MAX (from, sched_class_regs_num[cl]);
2383   return MAX (to, from) - from;
2384 }
2385 
2386 /* Return baseECC (ira_pressure_classes[PCI], POINT), given that
2387    P = curr_reg_pressure[ira_pressure_classes[PCI]] and that
2388    P' = P + DELTA.  */
2389 
2390 static int
model_excess_group_cost(struct model_pressure_group * group,int point,int pci,int delta)2391 model_excess_group_cost (struct model_pressure_group *group,
2392 			 int point, int pci, int delta)
2393 {
2394   int pressure, cl;
2395 
2396   cl = ira_pressure_classes[pci];
2397   if (delta < 0 && point >= group->limits[pci].point)
2398     {
2399       pressure = MAX (group->limits[pci].orig_pressure,
2400 		      curr_reg_pressure[cl] + delta);
2401       return -model_spill_cost (cl, pressure, curr_reg_pressure[cl]);
2402     }
2403 
2404   if (delta > 0)
2405     {
2406       if (point > group->limits[pci].point)
2407 	pressure = group->limits[pci].pressure + delta;
2408       else
2409 	pressure = curr_reg_pressure[cl] + delta;
2410 
2411       if (pressure > group->limits[pci].pressure)
2412 	return model_spill_cost (cl, group->limits[pci].orig_pressure,
2413 				 pressure);
2414     }
2415 
2416   return 0;
2417 }
2418 
2419 /* Return baseECC (MODEL_INSN (INSN)).  Dump the costs to sched_dump
2420    if PRINT_P.  */
2421 
2422 static int
model_excess_cost(rtx_insn * insn,bool print_p)2423 model_excess_cost (rtx_insn *insn, bool print_p)
2424 {
2425   int point, pci, cl, cost, this_cost, delta;
2426   struct reg_pressure_data *insn_reg_pressure;
2427   int insn_death[N_REG_CLASSES];
2428 
2429   calculate_reg_deaths (insn, insn_death);
2430   point = model_index (insn);
2431   insn_reg_pressure = INSN_REG_PRESSURE (insn);
2432   cost = 0;
2433 
2434   if (print_p)
2435     fprintf (sched_dump, ";;\t\t| %3d %4d | %4d %+3d |", point,
2436 	     INSN_UID (insn), INSN_PRIORITY (insn), insn_delay (insn));
2437 
2438   /* Sum up the individual costs for each register class.  */
2439   for (pci = 0; pci < ira_pressure_classes_num; pci++)
2440     {
2441       cl = ira_pressure_classes[pci];
2442       delta = insn_reg_pressure[pci].set_increase - insn_death[cl];
2443       this_cost = model_excess_group_cost (&model_before_pressure,
2444 					   point, pci, delta);
2445       cost += this_cost;
2446       if (print_p)
2447 	fprintf (sched_dump, " %s:[%d base cost %d]",
2448 		 reg_class_names[cl], delta, this_cost);
2449     }
2450 
2451   if (print_p)
2452     fprintf (sched_dump, "\n");
2453 
2454   return cost;
2455 }
2456 
2457 /* Dump the next points of maximum pressure for GROUP.  */
2458 
2459 static void
model_dump_pressure_points(struct model_pressure_group * group)2460 model_dump_pressure_points (struct model_pressure_group *group)
2461 {
2462   int pci, cl;
2463 
2464   fprintf (sched_dump, ";;\t\t|  pressure points");
2465   for (pci = 0; pci < ira_pressure_classes_num; pci++)
2466     {
2467       cl = ira_pressure_classes[pci];
2468       fprintf (sched_dump, " %s:[%d->%d at ", reg_class_names[cl],
2469 	       curr_reg_pressure[cl], group->limits[pci].pressure);
2470       if (group->limits[pci].point < model_num_insns)
2471 	fprintf (sched_dump, "%d:%d]", group->limits[pci].point,
2472 		 INSN_UID (MODEL_INSN (group->limits[pci].point)));
2473       else
2474 	fprintf (sched_dump, "end]");
2475     }
2476   fprintf (sched_dump, "\n");
2477 }
2478 
2479 /* Set INSN_REG_PRESSURE_EXCESS_COST_CHANGE for INSNS[0...COUNT-1].  */
2480 
2481 static void
model_set_excess_costs(rtx_insn ** insns,int count)2482 model_set_excess_costs (rtx_insn **insns, int count)
2483 {
2484   int i, cost, priority_base, priority;
2485   bool print_p;
2486 
2487   /* Record the baseECC value for each instruction in the model schedule,
2488      except that negative costs are converted to zero ones now rather than
2489      later.  Do not assign a cost to debug instructions, since they must
2490      not change code-generation decisions.  Experiments suggest we also
2491      get better results by not assigning a cost to instructions from
2492      a different block.
2493 
2494      Set PRIORITY_BASE to baseP in the block comment above.  This is the
2495      maximum priority of the "cheap" instructions, which should always
2496      include the next model instruction.  */
2497   priority_base = 0;
2498   print_p = false;
2499   for (i = 0; i < count; i++)
2500     if (INSN_MODEL_INDEX (insns[i]))
2501       {
2502 	if (sched_verbose >= 6 && !print_p)
2503 	  {
2504 	    fprintf (sched_dump, MODEL_BAR);
2505 	    fprintf (sched_dump, ";;\t\t| Pressure costs for ready queue\n");
2506 	    model_dump_pressure_points (&model_before_pressure);
2507 	    fprintf (sched_dump, MODEL_BAR);
2508 	    print_p = true;
2509 	  }
2510 	cost = model_excess_cost (insns[i], print_p);
2511 	if (cost <= 0)
2512 	  {
2513 	    priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]) - cost;
2514 	    priority_base = MAX (priority_base, priority);
2515 	    cost = 0;
2516 	  }
2517 	INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = cost;
2518       }
2519   if (print_p)
2520     fprintf (sched_dump, MODEL_BAR);
2521 
2522   /* Use MAX (baseECC, 0) and baseP to calculcate ECC for each
2523      instruction.  */
2524   for (i = 0; i < count; i++)
2525     {
2526       cost = INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]);
2527       priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]);
2528       if (cost > 0 && priority > priority_base)
2529 	{
2530 	  cost += priority_base - priority;
2531 	  INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = MAX (cost, 0);
2532 	}
2533     }
2534 }
2535 
2536 
2537 /* Enum of rank_for_schedule heuristic decisions.  */
2538 enum rfs_decision {
2539   RFS_LIVE_RANGE_SHRINK1, RFS_LIVE_RANGE_SHRINK2,
2540   RFS_SCHED_GROUP, RFS_PRESSURE_DELAY, RFS_PRESSURE_TICK,
2541   RFS_FEEDS_BACKTRACK_INSN, RFS_PRIORITY, RFS_SPECULATION,
2542   RFS_SCHED_RANK, RFS_LAST_INSN, RFS_PRESSURE_INDEX,
2543   RFS_DEP_COUNT, RFS_TIE, RFS_FUSION, RFS_COST, RFS_N };
2544 
2545 /* Corresponding strings for print outs.  */
2546 static const char *rfs_str[RFS_N] = {
2547   "RFS_LIVE_RANGE_SHRINK1", "RFS_LIVE_RANGE_SHRINK2",
2548   "RFS_SCHED_GROUP", "RFS_PRESSURE_DELAY", "RFS_PRESSURE_TICK",
2549   "RFS_FEEDS_BACKTRACK_INSN", "RFS_PRIORITY", "RFS_SPECULATION",
2550   "RFS_SCHED_RANK", "RFS_LAST_INSN", "RFS_PRESSURE_INDEX",
2551   "RFS_DEP_COUNT", "RFS_TIE", "RFS_FUSION", "RFS_COST" };
2552 
2553 /* Statistical breakdown of rank_for_schedule decisions.  */
2554 struct rank_for_schedule_stats_t { unsigned stats[RFS_N]; };
2555 static rank_for_schedule_stats_t rank_for_schedule_stats;
2556 
2557 /* Return the result of comparing insns TMP and TMP2 and update
2558    Rank_For_Schedule statistics.  */
2559 static int
rfs_result(enum rfs_decision decision,int result,rtx tmp,rtx tmp2)2560 rfs_result (enum rfs_decision decision, int result, rtx tmp, rtx tmp2)
2561 {
2562   ++rank_for_schedule_stats.stats[decision];
2563   if (result < 0)
2564     INSN_LAST_RFS_WIN (tmp) = decision;
2565   else if (result > 0)
2566     INSN_LAST_RFS_WIN (tmp2) = decision;
2567   else
2568     gcc_unreachable ();
2569   return result;
2570 }
2571 
2572 /* Sorting predicate to move DEBUG_INSNs to the top of ready list, while
2573    keeping normal insns in original order.  */
2574 
2575 static int
rank_for_schedule_debug(const void * x,const void * y)2576 rank_for_schedule_debug (const void *x, const void *y)
2577 {
2578   rtx_insn *tmp = *(rtx_insn * const *) y;
2579   rtx_insn *tmp2 = *(rtx_insn * const *) x;
2580 
2581   /* Schedule debug insns as early as possible.  */
2582   if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
2583     return -1;
2584   else if (!DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2585     return 1;
2586   else if (DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2587     return INSN_LUID (tmp) - INSN_LUID (tmp2);
2588   else
2589     return INSN_RFS_DEBUG_ORIG_ORDER (tmp2) - INSN_RFS_DEBUG_ORIG_ORDER (tmp);
2590 }
2591 
2592 /* Returns a positive value if x is preferred; returns a negative value if
2593    y is preferred.  Should never return 0, since that will make the sort
2594    unstable.  */
2595 
2596 static int
rank_for_schedule(const void * x,const void * y)2597 rank_for_schedule (const void *x, const void *y)
2598 {
2599   rtx_insn *tmp = *(rtx_insn * const *) y;
2600   rtx_insn *tmp2 = *(rtx_insn * const *) x;
2601   int tmp_class, tmp2_class;
2602   int val, priority_val, info_val, diff;
2603 
2604   if (live_range_shrinkage_p)
2605     {
2606       /* Don't use SCHED_PRESSURE_MODEL -- it results in much worse
2607 	 code.  */
2608       gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
2609       if ((INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp) < 0
2610 	   || INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2) < 0)
2611 	  && (diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2612 		      - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2))) != 0)
2613 	return rfs_result (RFS_LIVE_RANGE_SHRINK1, diff, tmp, tmp2);
2614       /* Sort by INSN_LUID (original insn order), so that we make the
2615 	 sort stable.  This minimizes instruction movement, thus
2616 	 minimizing sched's effect on debugging and cross-jumping.  */
2617       return rfs_result (RFS_LIVE_RANGE_SHRINK2,
2618 			 INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2619     }
2620 
2621   /* The insn in a schedule group should be issued the first.  */
2622   if (flag_sched_group_heuristic &&
2623       SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
2624     return rfs_result (RFS_SCHED_GROUP, SCHED_GROUP_P (tmp2) ? 1 : -1,
2625 		       tmp, tmp2);
2626 
2627   /* Make sure that priority of TMP and TMP2 are initialized.  */
2628   gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
2629 
2630   if (sched_fusion)
2631     {
2632       /* The instruction that has the same fusion priority as the last
2633 	 instruction is the instruction we picked next.  If that is not
2634 	 the case, we sort ready list firstly by fusion priority, then
2635 	 by priority, and at last by INSN_LUID.  */
2636       int a = INSN_FUSION_PRIORITY (tmp);
2637       int b = INSN_FUSION_PRIORITY (tmp2);
2638       int last = -1;
2639 
2640       if (last_nondebug_scheduled_insn
2641 	  && !NOTE_P (last_nondebug_scheduled_insn)
2642 	  && BLOCK_FOR_INSN (tmp)
2643 	       == BLOCK_FOR_INSN (last_nondebug_scheduled_insn))
2644 	last = INSN_FUSION_PRIORITY (last_nondebug_scheduled_insn);
2645 
2646       if (a != last && b != last)
2647 	{
2648 	  if (a == b)
2649 	    {
2650 	      a = INSN_PRIORITY (tmp);
2651 	      b = INSN_PRIORITY (tmp2);
2652 	    }
2653 	  if (a != b)
2654 	    return rfs_result (RFS_FUSION, b - a, tmp, tmp2);
2655 	  else
2656 	    return rfs_result (RFS_FUSION,
2657 			       INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2658 	}
2659       else if (a == b)
2660 	{
2661 	  gcc_assert (last_nondebug_scheduled_insn
2662 		      && !NOTE_P (last_nondebug_scheduled_insn));
2663 	  last = INSN_PRIORITY (last_nondebug_scheduled_insn);
2664 
2665 	  a = abs (INSN_PRIORITY (tmp) - last);
2666 	  b = abs (INSN_PRIORITY (tmp2) - last);
2667 	  if (a != b)
2668 	    return rfs_result (RFS_FUSION, a - b, tmp, tmp2);
2669 	  else
2670 	    return rfs_result (RFS_FUSION,
2671 			       INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2672 	}
2673       else if (a == last)
2674 	return rfs_result (RFS_FUSION, -1, tmp, tmp2);
2675       else
2676 	return rfs_result (RFS_FUSION, 1, tmp, tmp2);
2677     }
2678 
2679   if (sched_pressure != SCHED_PRESSURE_NONE)
2680     {
2681       /* Prefer insn whose scheduling results in the smallest register
2682 	 pressure excess.  */
2683       if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2684 		   + insn_delay (tmp)
2685 		   - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
2686 		   - insn_delay (tmp2))))
2687 	return rfs_result (RFS_PRESSURE_DELAY, diff, tmp, tmp2);
2688     }
2689 
2690   if (sched_pressure != SCHED_PRESSURE_NONE
2691       && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var)
2692       && INSN_TICK (tmp2) != INSN_TICK (tmp))
2693     {
2694       diff = INSN_TICK (tmp) - INSN_TICK (tmp2);
2695       return rfs_result (RFS_PRESSURE_TICK, diff, tmp, tmp2);
2696     }
2697 
2698   /* If we are doing backtracking in this schedule, prefer insns that
2699      have forward dependencies with negative cost against an insn that
2700      was already scheduled.  */
2701   if (current_sched_info->flags & DO_BACKTRACKING)
2702     {
2703       priority_val = FEEDS_BACKTRACK_INSN (tmp2) - FEEDS_BACKTRACK_INSN (tmp);
2704       if (priority_val)
2705 	return rfs_result (RFS_FEEDS_BACKTRACK_INSN, priority_val, tmp, tmp2);
2706     }
2707 
2708   /* Prefer insn with higher priority.  */
2709   priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
2710 
2711   if (flag_sched_critical_path_heuristic && priority_val)
2712     return rfs_result (RFS_PRIORITY, priority_val, tmp, tmp2);
2713 
2714   if (param_sched_autopref_queue_depth >= 0)
2715     {
2716       int autopref = autopref_rank_for_schedule (tmp, tmp2);
2717       if (autopref != 0)
2718 	return autopref;
2719     }
2720 
2721   /* Prefer speculative insn with greater dependencies weakness.  */
2722   if (flag_sched_spec_insn_heuristic && spec_info)
2723     {
2724       ds_t ds1, ds2;
2725       dw_t dw1, dw2;
2726       int dw;
2727 
2728       ds1 = TODO_SPEC (tmp) & SPECULATIVE;
2729       if (ds1)
2730 	dw1 = ds_weak (ds1);
2731       else
2732 	dw1 = NO_DEP_WEAK;
2733 
2734       ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
2735       if (ds2)
2736 	dw2 = ds_weak (ds2);
2737       else
2738 	dw2 = NO_DEP_WEAK;
2739 
2740       dw = dw2 - dw1;
2741       if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
2742 	return rfs_result (RFS_SPECULATION, dw, tmp, tmp2);
2743     }
2744 
2745   info_val = (*current_sched_info->rank) (tmp, tmp2);
2746   if (flag_sched_rank_heuristic && info_val)
2747     return rfs_result (RFS_SCHED_RANK, info_val, tmp, tmp2);
2748 
2749   /* Compare insns based on their relation to the last scheduled
2750      non-debug insn.  */
2751   if (flag_sched_last_insn_heuristic && last_nondebug_scheduled_insn)
2752     {
2753       dep_t dep1;
2754       dep_t dep2;
2755       rtx_insn *last = last_nondebug_scheduled_insn;
2756 
2757       /* Classify the instructions into three classes:
2758          1) Data dependent on last schedule insn.
2759          2) Anti/Output dependent on last scheduled insn.
2760          3) Independent of last scheduled insn, or has latency of one.
2761          Choose the insn from the highest numbered class if different.  */
2762       dep1 = sd_find_dep_between (last, tmp, true);
2763 
2764       if (dep1 == NULL || dep_cost (dep1) == 1)
2765 	tmp_class = 3;
2766       else if (/* Data dependence.  */
2767 	       DEP_TYPE (dep1) == REG_DEP_TRUE)
2768 	tmp_class = 1;
2769       else
2770 	tmp_class = 2;
2771 
2772       dep2 = sd_find_dep_between (last, tmp2, true);
2773 
2774       if (dep2 == NULL || dep_cost (dep2)  == 1)
2775 	tmp2_class = 3;
2776       else if (/* Data dependence.  */
2777 	       DEP_TYPE (dep2) == REG_DEP_TRUE)
2778 	tmp2_class = 1;
2779       else
2780 	tmp2_class = 2;
2781 
2782       if ((val = tmp2_class - tmp_class))
2783 	return rfs_result (RFS_LAST_INSN, val, tmp, tmp2);
2784     }
2785 
2786   /* Prefer instructions that occur earlier in the model schedule.  */
2787   if (sched_pressure == SCHED_PRESSURE_MODEL)
2788     {
2789       diff = model_index (tmp) - model_index (tmp2);
2790       if (diff != 0)
2791 	return rfs_result (RFS_PRESSURE_INDEX, diff, tmp, tmp2);
2792     }
2793 
2794   /* Prefer the insn which has more later insns that depend on it.
2795      This gives the scheduler more freedom when scheduling later
2796      instructions at the expense of added register pressure.  */
2797 
2798   val = (dep_list_size (tmp2, SD_LIST_FORW)
2799 	 - dep_list_size (tmp, SD_LIST_FORW));
2800 
2801   if (flag_sched_dep_count_heuristic && val != 0)
2802     return rfs_result (RFS_DEP_COUNT, val, tmp, tmp2);
2803 
2804   /* Sort by INSN_COST rather than INSN_LUID.  This means that instructions
2805      which take longer to execute are prioritised and it leads to more
2806      dual-issue opportunities on in-order cores which have this feature.  */
2807 
2808   if (INSN_COST (tmp) != INSN_COST (tmp2))
2809     return rfs_result (RFS_COST, INSN_COST (tmp2) - INSN_COST (tmp),
2810 		       tmp, tmp2);
2811 
2812   /* If insns are equally good, sort by INSN_LUID (original insn order),
2813      so that we make the sort stable.  This minimizes instruction movement,
2814      thus minimizing sched's effect on debugging and cross-jumping.  */
2815   return rfs_result (RFS_TIE, INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2816 }
2817 
2818 /* Resort the array A in which only element at index N may be out of order.  */
2819 
2820 HAIFA_INLINE static void
swap_sort(rtx_insn ** a,int n)2821 swap_sort (rtx_insn **a, int n)
2822 {
2823   rtx_insn *insn = a[n - 1];
2824   int i = n - 2;
2825 
2826   while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
2827     {
2828       a[i + 1] = a[i];
2829       i -= 1;
2830     }
2831   a[i + 1] = insn;
2832 }
2833 
2834 /* Add INSN to the insn queue so that it can be executed at least
2835    N_CYCLES after the currently executing insn.  Preserve insns
2836    chain for debugging purposes.  REASON will be printed in debugging
2837    output.  */
2838 
2839 HAIFA_INLINE static void
queue_insn(rtx_insn * insn,int n_cycles,const char * reason)2840 queue_insn (rtx_insn *insn, int n_cycles, const char *reason)
2841 {
2842   int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
2843   rtx_insn_list *link = alloc_INSN_LIST (insn, insn_queue[next_q]);
2844   int new_tick;
2845 
2846   gcc_assert (n_cycles <= max_insn_queue_index);
2847   gcc_assert (!DEBUG_INSN_P (insn));
2848 
2849   insn_queue[next_q] = link;
2850   q_size += 1;
2851 
2852   if (sched_verbose >= 2)
2853     {
2854       fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
2855 	       (*current_sched_info->print_insn) (insn, 0));
2856 
2857       fprintf (sched_dump, "queued for %d cycles (%s).\n", n_cycles, reason);
2858     }
2859 
2860   QUEUE_INDEX (insn) = next_q;
2861 
2862   if (current_sched_info->flags & DO_BACKTRACKING)
2863     {
2864       new_tick = clock_var + n_cycles;
2865       if (INSN_TICK (insn) == INVALID_TICK || INSN_TICK (insn) < new_tick)
2866 	INSN_TICK (insn) = new_tick;
2867 
2868       if (INSN_EXACT_TICK (insn) != INVALID_TICK
2869 	  && INSN_EXACT_TICK (insn) < clock_var + n_cycles)
2870 	{
2871 	  must_backtrack = true;
2872 	  if (sched_verbose >= 2)
2873 	    fprintf (sched_dump, ";;\t\tcausing a backtrack.\n");
2874 	}
2875     }
2876 }
2877 
2878 /* Remove INSN from queue.  */
2879 static void
queue_remove(rtx_insn * insn)2880 queue_remove (rtx_insn *insn)
2881 {
2882   gcc_assert (QUEUE_INDEX (insn) >= 0);
2883   remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
2884   q_size--;
2885   QUEUE_INDEX (insn) = QUEUE_NOWHERE;
2886 }
2887 
2888 /* Return a pointer to the bottom of the ready list, i.e. the insn
2889    with the lowest priority.  */
2890 
2891 rtx_insn **
ready_lastpos(struct ready_list * ready)2892 ready_lastpos (struct ready_list *ready)
2893 {
2894   gcc_assert (ready->n_ready >= 1);
2895   return ready->vec + ready->first - ready->n_ready + 1;
2896 }
2897 
2898 /* Add an element INSN to the ready list so that it ends up with the
2899    lowest/highest priority depending on FIRST_P.  */
2900 
2901 HAIFA_INLINE static void
ready_add(struct ready_list * ready,rtx_insn * insn,bool first_p)2902 ready_add (struct ready_list *ready, rtx_insn *insn, bool first_p)
2903 {
2904   if (!first_p)
2905     {
2906       if (ready->first == ready->n_ready)
2907 	{
2908 	  memmove (ready->vec + ready->veclen - ready->n_ready,
2909 		   ready_lastpos (ready),
2910 		   ready->n_ready * sizeof (rtx));
2911 	  ready->first = ready->veclen - 1;
2912 	}
2913       ready->vec[ready->first - ready->n_ready] = insn;
2914     }
2915   else
2916     {
2917       if (ready->first == ready->veclen - 1)
2918 	{
2919 	  if (ready->n_ready)
2920 	    /* ready_lastpos() fails when called with (ready->n_ready == 0).  */
2921 	    memmove (ready->vec + ready->veclen - ready->n_ready - 1,
2922 		     ready_lastpos (ready),
2923 		     ready->n_ready * sizeof (rtx));
2924 	  ready->first = ready->veclen - 2;
2925 	}
2926       ready->vec[++(ready->first)] = insn;
2927     }
2928 
2929   ready->n_ready++;
2930   if (DEBUG_INSN_P (insn))
2931     ready->n_debug++;
2932 
2933   gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
2934   QUEUE_INDEX (insn) = QUEUE_READY;
2935 
2936   if (INSN_EXACT_TICK (insn) != INVALID_TICK
2937       && INSN_EXACT_TICK (insn) < clock_var)
2938     {
2939       must_backtrack = true;
2940     }
2941 }
2942 
2943 /* Remove the element with the highest priority from the ready list and
2944    return it.  */
2945 
2946 HAIFA_INLINE static rtx_insn *
ready_remove_first(struct ready_list * ready)2947 ready_remove_first (struct ready_list *ready)
2948 {
2949   rtx_insn *t;
2950 
2951   gcc_assert (ready->n_ready);
2952   t = ready->vec[ready->first--];
2953   ready->n_ready--;
2954   if (DEBUG_INSN_P (t))
2955     ready->n_debug--;
2956   /* If the queue becomes empty, reset it.  */
2957   if (ready->n_ready == 0)
2958     ready->first = ready->veclen - 1;
2959 
2960   gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
2961   QUEUE_INDEX (t) = QUEUE_NOWHERE;
2962 
2963   return t;
2964 }
2965 
2966 /* The following code implements multi-pass scheduling for the first
2967    cycle.  In other words, we will try to choose ready insn which
2968    permits to start maximum number of insns on the same cycle.  */
2969 
2970 /* Return a pointer to the element INDEX from the ready.  INDEX for
2971    insn with the highest priority is 0, and the lowest priority has
2972    N_READY - 1.  */
2973 
2974 rtx_insn *
ready_element(struct ready_list * ready,int index)2975 ready_element (struct ready_list *ready, int index)
2976 {
2977   gcc_assert (ready->n_ready && index < ready->n_ready);
2978 
2979   return ready->vec[ready->first - index];
2980 }
2981 
2982 /* Remove the element INDEX from the ready list and return it.  INDEX
2983    for insn with the highest priority is 0, and the lowest priority
2984    has N_READY - 1.  */
2985 
2986 HAIFA_INLINE static rtx_insn *
ready_remove(struct ready_list * ready,int index)2987 ready_remove (struct ready_list *ready, int index)
2988 {
2989   rtx_insn *t;
2990   int i;
2991 
2992   if (index == 0)
2993     return ready_remove_first (ready);
2994   gcc_assert (ready->n_ready && index < ready->n_ready);
2995   t = ready->vec[ready->first - index];
2996   ready->n_ready--;
2997   if (DEBUG_INSN_P (t))
2998     ready->n_debug--;
2999   for (i = index; i < ready->n_ready; i++)
3000     ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
3001   QUEUE_INDEX (t) = QUEUE_NOWHERE;
3002   return t;
3003 }
3004 
3005 /* Remove INSN from the ready list.  */
3006 static void
ready_remove_insn(rtx_insn * insn)3007 ready_remove_insn (rtx_insn *insn)
3008 {
3009   int i;
3010 
3011   for (i = 0; i < readyp->n_ready; i++)
3012     if (ready_element (readyp, i) == insn)
3013       {
3014         ready_remove (readyp, i);
3015         return;
3016       }
3017   gcc_unreachable ();
3018 }
3019 
3020 /* Calculate difference of two statistics set WAS and NOW.
3021    Result returned in WAS.  */
3022 static void
rank_for_schedule_stats_diff(rank_for_schedule_stats_t * was,const rank_for_schedule_stats_t * now)3023 rank_for_schedule_stats_diff (rank_for_schedule_stats_t *was,
3024 			      const rank_for_schedule_stats_t *now)
3025 {
3026   for (int i = 0; i < RFS_N; ++i)
3027     was->stats[i] = now->stats[i] - was->stats[i];
3028 }
3029 
3030 /* Print rank_for_schedule statistics.  */
3031 static void
print_rank_for_schedule_stats(const char * prefix,const rank_for_schedule_stats_t * stats,struct ready_list * ready)3032 print_rank_for_schedule_stats (const char *prefix,
3033 			       const rank_for_schedule_stats_t *stats,
3034 			       struct ready_list *ready)
3035 {
3036   for (int i = 0; i < RFS_N; ++i)
3037     if (stats->stats[i])
3038       {
3039 	fprintf (sched_dump, "%s%20s: %u", prefix, rfs_str[i], stats->stats[i]);
3040 
3041 	if (ready != NULL)
3042 	  /* Print out insns that won due to RFS_<I>.  */
3043 	  {
3044 	    rtx_insn **p = ready_lastpos (ready);
3045 
3046 	    fprintf (sched_dump, ":");
3047 	    /* Start with 1 since least-priority insn didn't have any wins.  */
3048 	    for (int j = 1; j < ready->n_ready; ++j)
3049 	      if (INSN_LAST_RFS_WIN (p[j]) == i)
3050 		fprintf (sched_dump, " %s",
3051 			 (*current_sched_info->print_insn) (p[j], 0));
3052 	  }
3053 	fprintf (sched_dump, "\n");
3054       }
3055 }
3056 
3057 /* Separate DEBUG_INSNS from normal insns.  DEBUG_INSNs go to the end
3058    of array.  */
3059 static void
ready_sort_debug(struct ready_list * ready)3060 ready_sort_debug (struct ready_list *ready)
3061 {
3062   int i;
3063   rtx_insn **first = ready_lastpos (ready);
3064 
3065   for (i = 0; i < ready->n_ready; ++i)
3066     if (!DEBUG_INSN_P (first[i]))
3067       INSN_RFS_DEBUG_ORIG_ORDER (first[i]) = i;
3068 
3069   qsort (first, ready->n_ready, sizeof (rtx), rank_for_schedule_debug);
3070 }
3071 
3072 /* Sort non-debug insns in the ready list READY by ascending priority.
3073    Assumes that all debug insns are separated from the real insns.  */
3074 static void
ready_sort_real(struct ready_list * ready)3075 ready_sort_real (struct ready_list *ready)
3076 {
3077   int i;
3078   rtx_insn **first = ready_lastpos (ready);
3079   int n_ready_real = ready->n_ready - ready->n_debug;
3080 
3081   if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
3082     for (i = 0; i < n_ready_real; ++i)
3083       setup_insn_reg_pressure_info (first[i]);
3084   else if (sched_pressure == SCHED_PRESSURE_MODEL
3085 	   && model_curr_point < model_num_insns)
3086     model_set_excess_costs (first, n_ready_real);
3087 
3088   rank_for_schedule_stats_t stats1;
3089   if (sched_verbose >= 4)
3090     stats1 = rank_for_schedule_stats;
3091 
3092   if (n_ready_real == 2)
3093     swap_sort (first, n_ready_real);
3094   else if (n_ready_real > 2)
3095     qsort (first, n_ready_real, sizeof (rtx), rank_for_schedule);
3096 
3097   if (sched_verbose >= 4)
3098     {
3099       rank_for_schedule_stats_diff (&stats1, &rank_for_schedule_stats);
3100       print_rank_for_schedule_stats (";;\t\t", &stats1, ready);
3101     }
3102 }
3103 
3104 /* Sort the ready list READY by ascending priority.  */
3105 static void
ready_sort(struct ready_list * ready)3106 ready_sort (struct ready_list *ready)
3107 {
3108   if (ready->n_debug > 0)
3109     ready_sort_debug (ready);
3110   else
3111     ready_sort_real (ready);
3112 }
3113 
3114 /* PREV is an insn that is ready to execute.  Adjust its priority if that
3115    will help shorten or lengthen register lifetimes as appropriate.  Also
3116    provide a hook for the target to tweak itself.  */
3117 
3118 HAIFA_INLINE static void
adjust_priority(rtx_insn * prev)3119 adjust_priority (rtx_insn *prev)
3120 {
3121   /* ??? There used to be code here to try and estimate how an insn
3122      affected register lifetimes, but it did it by looking at REG_DEAD
3123      notes, which we removed in schedule_region.  Nor did it try to
3124      take into account register pressure or anything useful like that.
3125 
3126      Revisit when we have a machine model to work with and not before.  */
3127 
3128   if (targetm.sched.adjust_priority)
3129     INSN_PRIORITY (prev) =
3130       targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
3131 }
3132 
3133 /* Advance DFA state STATE on one cycle.  */
3134 void
advance_state(state_t state)3135 advance_state (state_t state)
3136 {
3137   if (targetm.sched.dfa_pre_advance_cycle)
3138     targetm.sched.dfa_pre_advance_cycle ();
3139 
3140   if (targetm.sched.dfa_pre_cycle_insn)
3141     state_transition (state,
3142 		      targetm.sched.dfa_pre_cycle_insn ());
3143 
3144   state_transition (state, NULL);
3145 
3146   if (targetm.sched.dfa_post_cycle_insn)
3147     state_transition (state,
3148 		      targetm.sched.dfa_post_cycle_insn ());
3149 
3150   if (targetm.sched.dfa_post_advance_cycle)
3151     targetm.sched.dfa_post_advance_cycle ();
3152 }
3153 
3154 /* Advance time on one cycle.  */
3155 HAIFA_INLINE static void
advance_one_cycle(void)3156 advance_one_cycle (void)
3157 {
3158   advance_state (curr_state);
3159   if (sched_verbose >= 4)
3160     fprintf (sched_dump, ";;\tAdvance the current state.\n");
3161 }
3162 
3163 /* Update register pressure after scheduling INSN.  */
3164 static void
update_register_pressure(rtx_insn * insn)3165 update_register_pressure (rtx_insn *insn)
3166 {
3167   struct reg_use_data *use;
3168   struct reg_set_data *set;
3169 
3170   gcc_checking_assert (!DEBUG_INSN_P (insn));
3171 
3172   for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
3173     if (dying_use_p (use))
3174       mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
3175 				 use->regno, false);
3176   for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
3177     mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
3178 			       set->regno, true);
3179 }
3180 
3181 /* Set up or update (if UPDATE_P) max register pressure (see its
3182    meaning in sched-int.h::_haifa_insn_data) for all current BB insns
3183    after insn AFTER.  */
3184 static void
setup_insn_max_reg_pressure(rtx_insn * after,bool update_p)3185 setup_insn_max_reg_pressure (rtx_insn *after, bool update_p)
3186 {
3187   int i, p;
3188   bool eq_p;
3189   rtx_insn *insn;
3190   static int max_reg_pressure[N_REG_CLASSES];
3191 
3192   save_reg_pressure ();
3193   for (i = 0; i < ira_pressure_classes_num; i++)
3194     max_reg_pressure[ira_pressure_classes[i]]
3195       = curr_reg_pressure[ira_pressure_classes[i]];
3196   for (insn = NEXT_INSN (after);
3197        insn != NULL_RTX && ! BARRIER_P (insn)
3198 	 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
3199        insn = NEXT_INSN (insn))
3200     if (NONDEBUG_INSN_P (insn))
3201       {
3202 	eq_p = true;
3203 	for (i = 0; i < ira_pressure_classes_num; i++)
3204 	  {
3205 	    p = max_reg_pressure[ira_pressure_classes[i]];
3206 	    if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
3207 	      {
3208 		eq_p = false;
3209 		INSN_MAX_REG_PRESSURE (insn)[i]
3210 		  = max_reg_pressure[ira_pressure_classes[i]];
3211 	      }
3212 	  }
3213 	if (update_p && eq_p)
3214 	  break;
3215 	update_register_pressure (insn);
3216 	for (i = 0; i < ira_pressure_classes_num; i++)
3217 	  if (max_reg_pressure[ira_pressure_classes[i]]
3218 	      < curr_reg_pressure[ira_pressure_classes[i]])
3219 	    max_reg_pressure[ira_pressure_classes[i]]
3220 	      = curr_reg_pressure[ira_pressure_classes[i]];
3221       }
3222   restore_reg_pressure ();
3223 }
3224 
3225 /* Update the current register pressure after scheduling INSN.  Update
3226    also max register pressure for unscheduled insns of the current
3227    BB.  */
3228 static void
update_reg_and_insn_max_reg_pressure(rtx_insn * insn)3229 update_reg_and_insn_max_reg_pressure (rtx_insn *insn)
3230 {
3231   int i;
3232   int before[N_REG_CLASSES];
3233 
3234   for (i = 0; i < ira_pressure_classes_num; i++)
3235     before[i] = curr_reg_pressure[ira_pressure_classes[i]];
3236   update_register_pressure (insn);
3237   for (i = 0; i < ira_pressure_classes_num; i++)
3238     if (curr_reg_pressure[ira_pressure_classes[i]] != before[i])
3239       break;
3240   if (i < ira_pressure_classes_num)
3241     setup_insn_max_reg_pressure (insn, true);
3242 }
3243 
3244 /* Set up register pressure at the beginning of basic block BB whose
3245    insns starting after insn AFTER.  Set up also max register pressure
3246    for all insns of the basic block.  */
3247 void
sched_setup_bb_reg_pressure_info(basic_block bb,rtx_insn * after)3248 sched_setup_bb_reg_pressure_info (basic_block bb, rtx_insn *after)
3249 {
3250   gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
3251   initiate_bb_reg_pressure_info (bb);
3252   setup_insn_max_reg_pressure (after, false);
3253 }
3254 
3255 /* If doing predication while scheduling, verify whether INSN, which
3256    has just been scheduled, clobbers the conditions of any
3257    instructions that must be predicated in order to break their
3258    dependencies.  If so, remove them from the queues so that they will
3259    only be scheduled once their control dependency is resolved.  */
3260 
3261 static void
check_clobbered_conditions(rtx_insn * insn)3262 check_clobbered_conditions (rtx_insn *insn)
3263 {
3264   HARD_REG_SET t;
3265   int i;
3266 
3267   if ((current_sched_info->flags & DO_PREDICATION) == 0)
3268     return;
3269 
3270   find_all_hard_reg_sets (insn, &t, true);
3271 
3272  restart:
3273   for (i = 0; i < ready.n_ready; i++)
3274     {
3275       rtx_insn *x = ready_element (&ready, i);
3276       if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3277 	{
3278 	  ready_remove_insn (x);
3279 	  goto restart;
3280 	}
3281     }
3282   for (i = 0; i <= max_insn_queue_index; i++)
3283     {
3284       rtx_insn_list *link;
3285       int q = NEXT_Q_AFTER (q_ptr, i);
3286 
3287     restart_queue:
3288       for (link = insn_queue[q]; link; link = link->next ())
3289 	{
3290 	  rtx_insn *x = link->insn ();
3291 	  if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3292 	    {
3293 	      queue_remove (x);
3294 	      goto restart_queue;
3295 	    }
3296 	}
3297     }
3298 }
3299 
3300 /* Return (in order):
3301 
3302    - positive if INSN adversely affects the pressure on one
3303      register class
3304 
3305    - negative if INSN reduces the pressure on one register class
3306 
3307    - 0 if INSN doesn't affect the pressure on any register class.  */
3308 
3309 static int
model_classify_pressure(struct model_insn_info * insn)3310 model_classify_pressure (struct model_insn_info *insn)
3311 {
3312   struct reg_pressure_data *reg_pressure;
3313   int death[N_REG_CLASSES];
3314   int pci, cl, sum;
3315 
3316   calculate_reg_deaths (insn->insn, death);
3317   reg_pressure = INSN_REG_PRESSURE (insn->insn);
3318   sum = 0;
3319   for (pci = 0; pci < ira_pressure_classes_num; pci++)
3320     {
3321       cl = ira_pressure_classes[pci];
3322       if (death[cl] < reg_pressure[pci].set_increase)
3323 	return 1;
3324       sum += reg_pressure[pci].set_increase - death[cl];
3325     }
3326   return sum;
3327 }
3328 
3329 /* Return true if INSN1 should come before INSN2 in the model schedule.  */
3330 
3331 static int
model_order_p(struct model_insn_info * insn1,struct model_insn_info * insn2)3332 model_order_p (struct model_insn_info *insn1, struct model_insn_info *insn2)
3333 {
3334   unsigned int height1, height2;
3335   unsigned int priority1, priority2;
3336 
3337   /* Prefer instructions with a higher model priority.  */
3338   if (insn1->model_priority != insn2->model_priority)
3339     return insn1->model_priority > insn2->model_priority;
3340 
3341   /* Combine the length of the longest path of satisfied true dependencies
3342      that leads to each instruction (depth) with the length of the longest
3343      path of any dependencies that leads from the instruction (alap).
3344      Prefer instructions with the greatest combined length.  If the combined
3345      lengths are equal, prefer instructions with the greatest depth.
3346 
3347      The idea is that, if we have a set S of "equal" instructions that each
3348      have ALAP value X, and we pick one such instruction I, any true-dependent
3349      successors of I that have ALAP value X - 1 should be preferred over S.
3350      This encourages the schedule to be "narrow" rather than "wide".
3351      However, if I is a low-priority instruction that we decided to
3352      schedule because of its model_classify_pressure, and if there
3353      is a set of higher-priority instructions T, the aforementioned
3354      successors of I should not have the edge over T.  */
3355   height1 = insn1->depth + insn1->alap;
3356   height2 = insn2->depth + insn2->alap;
3357   if (height1 != height2)
3358     return height1 > height2;
3359   if (insn1->depth != insn2->depth)
3360     return insn1->depth > insn2->depth;
3361 
3362   /* We have no real preference between INSN1 an INSN2 as far as attempts
3363      to reduce pressure go.  Prefer instructions with higher priorities.  */
3364   priority1 = INSN_PRIORITY (insn1->insn);
3365   priority2 = INSN_PRIORITY (insn2->insn);
3366   if (priority1 != priority2)
3367     return priority1 > priority2;
3368 
3369   /* Use the original rtl sequence as a tie-breaker.  */
3370   return insn1 < insn2;
3371 }
3372 
3373 /* Add INSN to the model worklist immediately after PREV.  Add it to the
3374    beginning of the list if PREV is null.  */
3375 
3376 static void
model_add_to_worklist_at(struct model_insn_info * insn,struct model_insn_info * prev)3377 model_add_to_worklist_at (struct model_insn_info *insn,
3378 			  struct model_insn_info *prev)
3379 {
3380   gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_NOWHERE);
3381   QUEUE_INDEX (insn->insn) = QUEUE_READY;
3382 
3383   insn->prev = prev;
3384   if (prev)
3385     {
3386       insn->next = prev->next;
3387       prev->next = insn;
3388     }
3389   else
3390     {
3391       insn->next = model_worklist;
3392       model_worklist = insn;
3393     }
3394   if (insn->next)
3395     insn->next->prev = insn;
3396 }
3397 
3398 /* Remove INSN from the model worklist.  */
3399 
3400 static void
model_remove_from_worklist(struct model_insn_info * insn)3401 model_remove_from_worklist (struct model_insn_info *insn)
3402 {
3403   gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_READY);
3404   QUEUE_INDEX (insn->insn) = QUEUE_NOWHERE;
3405 
3406   if (insn->prev)
3407     insn->prev->next = insn->next;
3408   else
3409     model_worklist = insn->next;
3410   if (insn->next)
3411     insn->next->prev = insn->prev;
3412 }
3413 
3414 /* Add INSN to the model worklist.  Start looking for a suitable position
3415    between neighbors PREV and NEXT, testing at most param_max_sched_ready_insns
3416    insns either side.  A null PREV indicates the beginning of the list and
3417    a null NEXT indicates the end.  */
3418 
3419 static void
model_add_to_worklist(struct model_insn_info * insn,struct model_insn_info * prev,struct model_insn_info * next)3420 model_add_to_worklist (struct model_insn_info *insn,
3421 		       struct model_insn_info *prev,
3422 		       struct model_insn_info *next)
3423 {
3424   int count;
3425 
3426   count = param_max_sched_ready_insns;
3427   if (count > 0 && prev && model_order_p (insn, prev))
3428     do
3429       {
3430 	count--;
3431 	prev = prev->prev;
3432       }
3433     while (count > 0 && prev && model_order_p (insn, prev));
3434   else
3435     while (count > 0 && next && model_order_p (next, insn))
3436       {
3437 	count--;
3438 	prev = next;
3439 	next = next->next;
3440       }
3441   model_add_to_worklist_at (insn, prev);
3442 }
3443 
3444 /* INSN may now have a higher priority (in the model_order_p sense)
3445    than before.  Move it up the worklist if necessary.  */
3446 
3447 static void
model_promote_insn(struct model_insn_info * insn)3448 model_promote_insn (struct model_insn_info *insn)
3449 {
3450   struct model_insn_info *prev;
3451   int count;
3452 
3453   prev = insn->prev;
3454   count = param_max_sched_ready_insns;
3455   while (count > 0 && prev && model_order_p (insn, prev))
3456     {
3457       count--;
3458       prev = prev->prev;
3459     }
3460   if (prev != insn->prev)
3461     {
3462       model_remove_from_worklist (insn);
3463       model_add_to_worklist_at (insn, prev);
3464     }
3465 }
3466 
3467 /* Add INSN to the end of the model schedule.  */
3468 
3469 static void
model_add_to_schedule(rtx_insn * insn)3470 model_add_to_schedule (rtx_insn *insn)
3471 {
3472   unsigned int point;
3473 
3474   gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
3475   QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
3476 
3477   point = model_schedule.length ();
3478   model_schedule.quick_push (insn);
3479   INSN_MODEL_INDEX (insn) = point + 1;
3480 }
3481 
3482 /* Analyze the instructions that are to be scheduled, setting up
3483    MODEL_INSN_INFO (...) and model_num_insns accordingly.  Add ready
3484    instructions to model_worklist.  */
3485 
3486 static void
model_analyze_insns(void)3487 model_analyze_insns (void)
3488 {
3489   rtx_insn *start, *end, *iter;
3490   sd_iterator_def sd_it;
3491   dep_t dep;
3492   struct model_insn_info *insn, *con;
3493 
3494   model_num_insns = 0;
3495   start = PREV_INSN (current_sched_info->next_tail);
3496   end = current_sched_info->prev_head;
3497   for (iter = start; iter != end; iter = PREV_INSN (iter))
3498     if (NONDEBUG_INSN_P (iter))
3499       {
3500 	insn = MODEL_INSN_INFO (iter);
3501 	insn->insn = iter;
3502 	FOR_EACH_DEP (iter, SD_LIST_FORW, sd_it, dep)
3503 	  {
3504 	    con = MODEL_INSN_INFO (DEP_CON (dep));
3505 	    if (con->insn && insn->alap < con->alap + 1)
3506 	      insn->alap = con->alap + 1;
3507 	  }
3508 
3509 	insn->old_queue = QUEUE_INDEX (iter);
3510 	QUEUE_INDEX (iter) = QUEUE_NOWHERE;
3511 
3512 	insn->unscheduled_preds = dep_list_size (iter, SD_LIST_HARD_BACK);
3513 	if (insn->unscheduled_preds == 0)
3514 	  model_add_to_worklist (insn, NULL, model_worklist);
3515 
3516 	model_num_insns++;
3517       }
3518 }
3519 
3520 /* The global state describes the register pressure at the start of the
3521    model schedule.  Initialize GROUP accordingly.  */
3522 
3523 static void
model_init_pressure_group(struct model_pressure_group * group)3524 model_init_pressure_group (struct model_pressure_group *group)
3525 {
3526   int pci, cl;
3527 
3528   for (pci = 0; pci < ira_pressure_classes_num; pci++)
3529     {
3530       cl = ira_pressure_classes[pci];
3531       group->limits[pci].pressure = curr_reg_pressure[cl];
3532       group->limits[pci].point = 0;
3533     }
3534   /* Use index model_num_insns to record the state after the last
3535      instruction in the model schedule.  */
3536   group->model = XNEWVEC (struct model_pressure_data,
3537 			  (model_num_insns + 1) * ira_pressure_classes_num);
3538 }
3539 
3540 /* Record that MODEL_REF_PRESSURE (GROUP, POINT, PCI) is PRESSURE.
3541    Update the maximum pressure for the whole schedule.  */
3542 
3543 static void
model_record_pressure(struct model_pressure_group * group,int point,int pci,int pressure)3544 model_record_pressure (struct model_pressure_group *group,
3545 		       int point, int pci, int pressure)
3546 {
3547   MODEL_REF_PRESSURE (group, point, pci) = pressure;
3548   if (group->limits[pci].pressure < pressure)
3549     {
3550       group->limits[pci].pressure = pressure;
3551       group->limits[pci].point = point;
3552     }
3553 }
3554 
3555 /* INSN has just been added to the end of the model schedule.  Record its
3556    register-pressure information.  */
3557 
3558 static void
model_record_pressures(struct model_insn_info * insn)3559 model_record_pressures (struct model_insn_info *insn)
3560 {
3561   struct reg_pressure_data *reg_pressure;
3562   int point, pci, cl, delta;
3563   int death[N_REG_CLASSES];
3564 
3565   point = model_index (insn->insn);
3566   if (sched_verbose >= 2)
3567     {
3568       if (point == 0)
3569 	{
3570 	  fprintf (sched_dump, "\n;;\tModel schedule:\n;;\n");
3571 	  fprintf (sched_dump, ";;\t| idx insn | mpri hght dpth prio |\n");
3572 	}
3573       fprintf (sched_dump, ";;\t| %3d %4d | %4d %4d %4d %4d | %-30s ",
3574 	       point, INSN_UID (insn->insn), insn->model_priority,
3575 	       insn->depth + insn->alap, insn->depth,
3576 	       INSN_PRIORITY (insn->insn),
3577 	       str_pattern_slim (PATTERN (insn->insn)));
3578     }
3579   calculate_reg_deaths (insn->insn, death);
3580   reg_pressure = INSN_REG_PRESSURE (insn->insn);
3581   for (pci = 0; pci < ira_pressure_classes_num; pci++)
3582     {
3583       cl = ira_pressure_classes[pci];
3584       delta = reg_pressure[pci].set_increase - death[cl];
3585       if (sched_verbose >= 2)
3586 	fprintf (sched_dump, " %s:[%d,%+d]", reg_class_names[cl],
3587 		 curr_reg_pressure[cl], delta);
3588       model_record_pressure (&model_before_pressure, point, pci,
3589 			     curr_reg_pressure[cl]);
3590     }
3591   if (sched_verbose >= 2)
3592     fprintf (sched_dump, "\n");
3593 }
3594 
3595 /* All instructions have been added to the model schedule.  Record the
3596    final register pressure in GROUP and set up all MODEL_MAX_PRESSUREs.  */
3597 
3598 static void
model_record_final_pressures(struct model_pressure_group * group)3599 model_record_final_pressures (struct model_pressure_group *group)
3600 {
3601   int point, pci, max_pressure, ref_pressure, cl;
3602 
3603   for (pci = 0; pci < ira_pressure_classes_num; pci++)
3604     {
3605       /* Record the final pressure for this class.  */
3606       cl = ira_pressure_classes[pci];
3607       point = model_num_insns;
3608       ref_pressure = curr_reg_pressure[cl];
3609       model_record_pressure (group, point, pci, ref_pressure);
3610 
3611       /* Record the original maximum pressure.  */
3612       group->limits[pci].orig_pressure = group->limits[pci].pressure;
3613 
3614       /* Update the MODEL_MAX_PRESSURE for every point of the schedule.  */
3615       max_pressure = ref_pressure;
3616       MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3617       while (point > 0)
3618 	{
3619 	  point--;
3620 	  ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
3621 	  max_pressure = MAX (max_pressure, ref_pressure);
3622 	  MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3623 	}
3624     }
3625 }
3626 
3627 /* Update all successors of INSN, given that INSN has just been scheduled.  */
3628 
3629 static void
model_add_successors_to_worklist(struct model_insn_info * insn)3630 model_add_successors_to_worklist (struct model_insn_info *insn)
3631 {
3632   sd_iterator_def sd_it;
3633   struct model_insn_info *con;
3634   dep_t dep;
3635 
3636   FOR_EACH_DEP (insn->insn, SD_LIST_FORW, sd_it, dep)
3637     {
3638       con = MODEL_INSN_INFO (DEP_CON (dep));
3639       /* Ignore debug instructions, and instructions from other blocks.  */
3640       if (con->insn)
3641 	{
3642 	  con->unscheduled_preds--;
3643 
3644 	  /* Update the depth field of each true-dependent successor.
3645 	     Increasing the depth gives them a higher priority than
3646 	     before.  */
3647 	  if (DEP_TYPE (dep) == REG_DEP_TRUE && con->depth < insn->depth + 1)
3648 	    {
3649 	      con->depth = insn->depth + 1;
3650 	      if (QUEUE_INDEX (con->insn) == QUEUE_READY)
3651 		model_promote_insn (con);
3652 	    }
3653 
3654 	  /* If this is a true dependency, or if there are no remaining
3655 	     dependencies for CON (meaning that CON only had non-true
3656 	     dependencies), make sure that CON is on the worklist.
3657 	     We don't bother otherwise because it would tend to fill the
3658 	     worklist with a lot of low-priority instructions that are not
3659 	     yet ready to issue.  */
3660 	  if ((con->depth > 0 || con->unscheduled_preds == 0)
3661 	      && QUEUE_INDEX (con->insn) == QUEUE_NOWHERE)
3662 	    model_add_to_worklist (con, insn, insn->next);
3663 	}
3664     }
3665 }
3666 
3667 /* Give INSN a higher priority than any current instruction, then give
3668    unscheduled predecessors of INSN a higher priority still.  If any of
3669    those predecessors are not on the model worklist, do the same for its
3670    predecessors, and so on.  */
3671 
3672 static void
model_promote_predecessors(struct model_insn_info * insn)3673 model_promote_predecessors (struct model_insn_info *insn)
3674 {
3675   struct model_insn_info *pro, *first;
3676   sd_iterator_def sd_it;
3677   dep_t dep;
3678 
3679   if (sched_verbose >= 7)
3680     fprintf (sched_dump, ";;\t+--- priority of %d = %d, priority of",
3681 	     INSN_UID (insn->insn), model_next_priority);
3682   insn->model_priority = model_next_priority++;
3683   model_remove_from_worklist (insn);
3684   model_add_to_worklist_at (insn, NULL);
3685 
3686   first = NULL;
3687   for (;;)
3688     {
3689       FOR_EACH_DEP (insn->insn, SD_LIST_HARD_BACK, sd_it, dep)
3690 	{
3691 	  pro = MODEL_INSN_INFO (DEP_PRO (dep));
3692 	  /* The first test is to ignore debug instructions, and instructions
3693 	     from other blocks.  */
3694 	  if (pro->insn
3695 	      && pro->model_priority != model_next_priority
3696 	      && QUEUE_INDEX (pro->insn) != QUEUE_SCHEDULED)
3697 	    {
3698 	      pro->model_priority = model_next_priority;
3699 	      if (sched_verbose >= 7)
3700 		fprintf (sched_dump, " %d", INSN_UID (pro->insn));
3701 	      if (QUEUE_INDEX (pro->insn) == QUEUE_READY)
3702 		{
3703 		  /* PRO is already in the worklist, but it now has
3704 		     a higher priority than before.  Move it at the
3705 		     appropriate place.  */
3706 		  model_remove_from_worklist (pro);
3707 		  model_add_to_worklist (pro, NULL, model_worklist);
3708 		}
3709 	      else
3710 		{
3711 		  /* PRO isn't in the worklist.  Recursively process
3712 		     its predecessors until we find one that is.  */
3713 		  pro->next = first;
3714 		  first = pro;
3715 		}
3716 	    }
3717 	}
3718       if (!first)
3719 	break;
3720       insn = first;
3721       first = insn->next;
3722     }
3723   if (sched_verbose >= 7)
3724     fprintf (sched_dump, " = %d\n", model_next_priority);
3725   model_next_priority++;
3726 }
3727 
3728 /* Pick one instruction from model_worklist and process it.  */
3729 
3730 static void
model_choose_insn(void)3731 model_choose_insn (void)
3732 {
3733   struct model_insn_info *insn, *fallback;
3734   int count;
3735 
3736   if (sched_verbose >= 7)
3737     {
3738       fprintf (sched_dump, ";;\t+--- worklist:\n");
3739       insn = model_worklist;
3740       count = param_max_sched_ready_insns;
3741       while (count > 0 && insn)
3742 	{
3743 	  fprintf (sched_dump, ";;\t+---   %d [%d, %d, %d, %d]\n",
3744 		   INSN_UID (insn->insn), insn->model_priority,
3745 		   insn->depth + insn->alap, insn->depth,
3746 		   INSN_PRIORITY (insn->insn));
3747 	  count--;
3748 	  insn = insn->next;
3749 	}
3750     }
3751 
3752   /* Look for a ready instruction whose model_classify_priority is zero
3753      or negative, picking the highest-priority one.  Adding such an
3754      instruction to the schedule now should do no harm, and may actually
3755      do some good.
3756 
3757      Failing that, see whether there is an instruction with the highest
3758      extant model_priority that is not yet ready, but which would reduce
3759      pressure if it became ready.  This is designed to catch cases like:
3760 
3761        (set (mem (reg R1)) (reg R2))
3762 
3763      where the instruction is the last remaining use of R1 and where the
3764      value of R2 is not yet available (or vice versa).  The death of R1
3765      means that this instruction already reduces pressure.  It is of
3766      course possible that the computation of R2 involves other registers
3767      that are hard to kill, but such cases are rare enough for this
3768      heuristic to be a win in general.
3769 
3770      Failing that, just pick the highest-priority instruction in the
3771      worklist.  */
3772   count = param_max_sched_ready_insns;
3773   insn = model_worklist;
3774   fallback = 0;
3775   for (;;)
3776     {
3777       if (count == 0 || !insn)
3778 	{
3779 	  insn = fallback ? fallback : model_worklist;
3780 	  break;
3781 	}
3782       if (insn->unscheduled_preds)
3783 	{
3784 	  if (model_worklist->model_priority == insn->model_priority
3785 	      && !fallback
3786 	      && model_classify_pressure (insn) < 0)
3787 	    fallback = insn;
3788 	}
3789       else
3790 	{
3791 	  if (model_classify_pressure (insn) <= 0)
3792 	    break;
3793 	}
3794       count--;
3795       insn = insn->next;
3796     }
3797 
3798   if (sched_verbose >= 7 && insn != model_worklist)
3799     {
3800       if (insn->unscheduled_preds)
3801 	fprintf (sched_dump, ";;\t+--- promoting insn %d, with dependencies\n",
3802 		 INSN_UID (insn->insn));
3803       else
3804 	fprintf (sched_dump, ";;\t+--- promoting insn %d, which is ready\n",
3805 		 INSN_UID (insn->insn));
3806     }
3807   if (insn->unscheduled_preds)
3808     /* INSN isn't yet ready to issue.  Give all its predecessors the
3809        highest priority.  */
3810     model_promote_predecessors (insn);
3811   else
3812     {
3813       /* INSN is ready.  Add it to the end of model_schedule and
3814 	 process its successors.  */
3815       model_add_successors_to_worklist (insn);
3816       model_remove_from_worklist (insn);
3817       model_add_to_schedule (insn->insn);
3818       model_record_pressures (insn);
3819       update_register_pressure (insn->insn);
3820     }
3821 }
3822 
3823 /* Restore all QUEUE_INDEXs to the values that they had before
3824    model_start_schedule was called.  */
3825 
3826 static void
model_reset_queue_indices(void)3827 model_reset_queue_indices (void)
3828 {
3829   unsigned int i;
3830   rtx_insn *insn;
3831 
3832   FOR_EACH_VEC_ELT (model_schedule, i, insn)
3833     QUEUE_INDEX (insn) = MODEL_INSN_INFO (insn)->old_queue;
3834 }
3835 
3836 /* We have calculated the model schedule and spill costs.  Print a summary
3837    to sched_dump.  */
3838 
3839 static void
model_dump_pressure_summary(void)3840 model_dump_pressure_summary (void)
3841 {
3842   int pci, cl;
3843 
3844   fprintf (sched_dump, ";; Pressure summary:");
3845   for (pci = 0; pci < ira_pressure_classes_num; pci++)
3846     {
3847       cl = ira_pressure_classes[pci];
3848       fprintf (sched_dump, " %s:%d", reg_class_names[cl],
3849 	       model_before_pressure.limits[pci].pressure);
3850     }
3851   fprintf (sched_dump, "\n\n");
3852 }
3853 
3854 /* Initialize the SCHED_PRESSURE_MODEL information for the current
3855    scheduling region.  */
3856 
3857 static void
model_start_schedule(basic_block bb)3858 model_start_schedule (basic_block bb)
3859 {
3860   model_next_priority = 1;
3861   model_schedule.create (sched_max_luid);
3862   model_insns = XCNEWVEC (struct model_insn_info, sched_max_luid);
3863 
3864   gcc_assert (bb == BLOCK_FOR_INSN (NEXT_INSN (current_sched_info->prev_head)));
3865   initiate_reg_pressure_info (df_get_live_in (bb));
3866 
3867   model_analyze_insns ();
3868   model_init_pressure_group (&model_before_pressure);
3869   while (model_worklist)
3870     model_choose_insn ();
3871   gcc_assert (model_num_insns == (int) model_schedule.length ());
3872   if (sched_verbose >= 2)
3873     fprintf (sched_dump, "\n");
3874 
3875   model_record_final_pressures (&model_before_pressure);
3876   model_reset_queue_indices ();
3877 
3878   XDELETEVEC (model_insns);
3879 
3880   model_curr_point = 0;
3881   initiate_reg_pressure_info (df_get_live_in (bb));
3882   if (sched_verbose >= 1)
3883     model_dump_pressure_summary ();
3884 }
3885 
3886 /* Free the information associated with GROUP.  */
3887 
3888 static void
model_finalize_pressure_group(struct model_pressure_group * group)3889 model_finalize_pressure_group (struct model_pressure_group *group)
3890 {
3891   XDELETEVEC (group->model);
3892 }
3893 
3894 /* Free the information created by model_start_schedule.  */
3895 
3896 static void
model_end_schedule(void)3897 model_end_schedule (void)
3898 {
3899   model_finalize_pressure_group (&model_before_pressure);
3900   model_schedule.release ();
3901 }
3902 
3903 /* Prepare reg pressure scheduling for basic block BB.  */
3904 static void
sched_pressure_start_bb(basic_block bb)3905 sched_pressure_start_bb (basic_block bb)
3906 {
3907   /* Set the number of available registers for each class taking into account
3908      relative probability of current basic block versus function prologue and
3909      epilogue.
3910      * If the basic block executes much more often than the prologue/epilogue
3911      (e.g., inside a hot loop), then cost of spill in the prologue is close to
3912      nil, so the effective number of available registers is
3913      (ira_class_hard_regs_num[cl] - fixed_regs_num[cl] - 0).
3914      * If the basic block executes as often as the prologue/epilogue,
3915      then spill in the block is as costly as in the prologue, so the effective
3916      number of available registers is
3917      (ira_class_hard_regs_num[cl] - fixed_regs_num[cl]
3918       - call_saved_regs_num[cl]).
3919      Note that all-else-equal, we prefer to spill in the prologue, since that
3920      allows "extra" registers for other basic blocks of the function.
3921      * If the basic block is on the cold path of the function and executes
3922      rarely, then we should always prefer to spill in the block, rather than
3923      in the prologue/epilogue.  The effective number of available register is
3924      (ira_class_hard_regs_num[cl] - fixed_regs_num[cl]
3925       - call_saved_regs_num[cl]).  */
3926   {
3927     int i;
3928     int entry_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.to_frequency (cfun);
3929     int bb_freq = bb->count.to_frequency (cfun);
3930 
3931     if (bb_freq == 0)
3932       {
3933 	if (entry_freq == 0)
3934 	  entry_freq = bb_freq = 1;
3935       }
3936     if (bb_freq < entry_freq)
3937       bb_freq = entry_freq;
3938 
3939     for (i = 0; i < ira_pressure_classes_num; ++i)
3940       {
3941 	enum reg_class cl = ira_pressure_classes[i];
3942 	sched_class_regs_num[cl] = ira_class_hard_regs_num[cl]
3943 				   - fixed_regs_num[cl];
3944 	sched_class_regs_num[cl]
3945 	  -= (call_saved_regs_num[cl] * entry_freq) / bb_freq;
3946       }
3947   }
3948 
3949   if (sched_pressure == SCHED_PRESSURE_MODEL)
3950     model_start_schedule (bb);
3951 }
3952 
3953 /* A structure that holds local state for the loop in schedule_block.  */
3954 struct sched_block_state
3955 {
3956   /* True if no real insns have been scheduled in the current cycle.  */
3957   bool first_cycle_insn_p;
3958   /* True if a shadow insn has been scheduled in the current cycle, which
3959      means that no more normal insns can be issued.  */
3960   bool shadows_only_p;
3961   /* True if we're winding down a modulo schedule, which means that we only
3962      issue insns with INSN_EXACT_TICK set.  */
3963   bool modulo_epilogue;
3964   /* Initialized with the machine's issue rate every cycle, and updated
3965      by calls to the variable_issue hook.  */
3966   int can_issue_more;
3967 };
3968 
3969 /* INSN is the "currently executing insn".  Launch each insn which was
3970    waiting on INSN.  READY is the ready list which contains the insns
3971    that are ready to fire.  CLOCK is the current cycle.  The function
3972    returns necessary cycle advance after issuing the insn (it is not
3973    zero for insns in a schedule group).  */
3974 
3975 static int
schedule_insn(rtx_insn * insn)3976 schedule_insn (rtx_insn *insn)
3977 {
3978   sd_iterator_def sd_it;
3979   dep_t dep;
3980   int i;
3981   int advance = 0;
3982 
3983   if (sched_verbose >= 1)
3984     {
3985       struct reg_pressure_data *pressure_info;
3986       fprintf (sched_dump, ";;\t%3i--> %s %-40s:",
3987 	       clock_var, (*current_sched_info->print_insn) (insn, 1),
3988 	       str_pattern_slim (PATTERN (insn)));
3989 
3990       if (recog_memoized (insn) < 0)
3991 	fprintf (sched_dump, "nothing");
3992       else
3993 	print_reservation (sched_dump, insn);
3994       pressure_info = INSN_REG_PRESSURE (insn);
3995       if (pressure_info != NULL)
3996 	{
3997 	  fputc (':', sched_dump);
3998 	  for (i = 0; i < ira_pressure_classes_num; i++)
3999 	    fprintf (sched_dump, "%s%s%+d(%d)",
4000 		     scheduled_insns.length () > 1
4001 		     && INSN_LUID (insn)
4002 		     < INSN_LUID (scheduled_insns[scheduled_insns.length () - 2]) ? "@" : "",
4003 		     reg_class_names[ira_pressure_classes[i]],
4004 		     pressure_info[i].set_increase, pressure_info[i].change);
4005 	}
4006       if (sched_pressure == SCHED_PRESSURE_MODEL
4007 	  && model_curr_point < model_num_insns
4008 	  && model_index (insn) == model_curr_point)
4009 	fprintf (sched_dump, ":model %d", model_curr_point);
4010       fputc ('\n', sched_dump);
4011     }
4012 
4013   if (sched_pressure == SCHED_PRESSURE_WEIGHTED && !DEBUG_INSN_P (insn))
4014     update_reg_and_insn_max_reg_pressure (insn);
4015 
4016   /* Scheduling instruction should have all its dependencies resolved and
4017      should have been removed from the ready list.  */
4018   gcc_assert (sd_lists_empty_p (insn, SD_LIST_HARD_BACK));
4019 
4020   /* Reset debug insns invalidated by moving this insn.  */
4021   if (MAY_HAVE_DEBUG_BIND_INSNS && !DEBUG_INSN_P (insn))
4022     for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
4023 	 sd_iterator_cond (&sd_it, &dep);)
4024       {
4025 	rtx_insn *dbg = DEP_PRO (dep);
4026 	struct reg_use_data *use, *next;
4027 
4028 	if (DEP_STATUS (dep) & DEP_CANCELLED)
4029 	  {
4030 	    sd_iterator_next (&sd_it);
4031 	    continue;
4032 	  }
4033 
4034 	gcc_assert (DEBUG_BIND_INSN_P (dbg));
4035 
4036 	if (sched_verbose >= 6)
4037 	  fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
4038 		   INSN_UID (dbg));
4039 
4040 	/* ??? Rather than resetting the debug insn, we might be able
4041 	   to emit a debug temp before the just-scheduled insn, but
4042 	   this would involve checking that the expression at the
4043 	   point of the debug insn is equivalent to the expression
4044 	   before the just-scheduled insn.  They might not be: the
4045 	   expression in the debug insn may depend on other insns not
4046 	   yet scheduled that set MEMs, REGs or even other debug
4047 	   insns.  It's not clear that attempting to preserve debug
4048 	   information in these cases is worth the effort, given how
4049 	   uncommon these resets are and the likelihood that the debug
4050 	   temps introduced won't survive the schedule change.  */
4051 	INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
4052 	df_insn_rescan (dbg);
4053 
4054 	/* Unknown location doesn't use any registers.  */
4055 	for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
4056 	  {
4057 	    struct reg_use_data *prev = use;
4058 
4059 	    /* Remove use from the cyclic next_regno_use chain first.  */
4060 	    while (prev->next_regno_use != use)
4061 	      prev = prev->next_regno_use;
4062 	    prev->next_regno_use = use->next_regno_use;
4063 	    next = use->next_insn_use;
4064 	    free (use);
4065 	  }
4066 	INSN_REG_USE_LIST (dbg) = NULL;
4067 
4068 	/* We delete rather than resolve these deps, otherwise we
4069 	   crash in sched_free_deps(), because forward deps are
4070 	   expected to be released before backward deps.  */
4071 	sd_delete_dep (sd_it);
4072       }
4073 
4074   gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
4075   QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
4076 
4077   if (sched_pressure == SCHED_PRESSURE_MODEL
4078       && model_curr_point < model_num_insns
4079       && NONDEBUG_INSN_P (insn))
4080     {
4081       if (model_index (insn) == model_curr_point)
4082 	do
4083 	  model_curr_point++;
4084 	while (model_curr_point < model_num_insns
4085 	       && (QUEUE_INDEX (MODEL_INSN (model_curr_point))
4086 		   == QUEUE_SCHEDULED));
4087       else
4088 	model_recompute (insn);
4089       model_update_limit_points ();
4090       update_register_pressure (insn);
4091       if (sched_verbose >= 2)
4092 	print_curr_reg_pressure ();
4093     }
4094 
4095   gcc_assert (INSN_TICK (insn) >= MIN_TICK);
4096   if (INSN_TICK (insn) > clock_var)
4097     /* INSN has been prematurely moved from the queue to the ready list.
4098        This is possible only if following flags are set.  */
4099     gcc_assert (flag_sched_stalled_insns || sched_fusion);
4100 
4101   /* ??? Probably, if INSN is scheduled prematurely, we should leave
4102      INSN_TICK untouched.  This is a machine-dependent issue, actually.  */
4103   INSN_TICK (insn) = clock_var;
4104 
4105   check_clobbered_conditions (insn);
4106 
4107   /* Update dependent instructions.  First, see if by scheduling this insn
4108      now we broke a dependence in a way that requires us to change another
4109      insn.  */
4110   for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4111        sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
4112     {
4113       struct dep_replacement *desc = DEP_REPLACE (dep);
4114       rtx_insn *pro = DEP_PRO (dep);
4115       if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED
4116 	  && desc != NULL && desc->insn == pro)
4117 	apply_replacement (dep, false);
4118     }
4119 
4120   /* Go through and resolve forward dependencies.  */
4121   for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4122        sd_iterator_cond (&sd_it, &dep);)
4123     {
4124       rtx_insn *next = DEP_CON (dep);
4125       bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0;
4126 
4127       /* Resolve the dependence between INSN and NEXT.
4128 	 sd_resolve_dep () moves current dep to another list thus
4129 	 advancing the iterator.  */
4130       sd_resolve_dep (sd_it);
4131 
4132       if (cancelled)
4133 	{
4134 	  if (must_restore_pattern_p (next, dep))
4135 	    restore_pattern (dep, false);
4136 	  continue;
4137 	}
4138 
4139       /* Don't bother trying to mark next as ready if insn is a debug
4140 	 insn.  If insn is the last hard dependency, it will have
4141 	 already been discounted.  */
4142       if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
4143 	continue;
4144 
4145       if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
4146 	{
4147 	  int effective_cost;
4148 
4149 	  effective_cost = try_ready (next);
4150 
4151 	  if (effective_cost >= 0
4152 	      && SCHED_GROUP_P (next)
4153 	      && advance < effective_cost)
4154 	    advance = effective_cost;
4155 	}
4156       else
4157 	/* Check always has only one forward dependence (to the first insn in
4158 	   the recovery block), therefore, this will be executed only once.  */
4159 	{
4160 	  gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
4161 	  fix_recovery_deps (RECOVERY_BLOCK (insn));
4162 	}
4163     }
4164 
4165   /* Annotate the instruction with issue information -- TImode
4166      indicates that the instruction is expected not to be able
4167      to issue on the same cycle as the previous insn.  A machine
4168      may use this information to decide how the instruction should
4169      be aligned.  */
4170   if (issue_rate > 1
4171       && GET_CODE (PATTERN (insn)) != USE
4172       && GET_CODE (PATTERN (insn)) != CLOBBER
4173       && !DEBUG_INSN_P (insn))
4174     {
4175       if (reload_completed)
4176 	PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
4177       last_clock_var = clock_var;
4178     }
4179 
4180   if (nonscheduled_insns_begin != NULL_RTX)
4181     /* Indicate to debug counters that INSN is scheduled.  */
4182     nonscheduled_insns_begin = insn;
4183 
4184   return advance;
4185 }
4186 
4187 /* Functions for handling of notes.  */
4188 
4189 /* Add note list that ends on FROM_END to the end of TO_ENDP.  */
4190 void
concat_note_lists(rtx_insn * from_end,rtx_insn ** to_endp)4191 concat_note_lists (rtx_insn *from_end, rtx_insn **to_endp)
4192 {
4193   rtx_insn *from_start;
4194 
4195   /* It's easy when have nothing to concat.  */
4196   if (from_end == NULL)
4197     return;
4198 
4199   /* It's also easy when destination is empty.  */
4200   if (*to_endp == NULL)
4201     {
4202       *to_endp = from_end;
4203       return;
4204     }
4205 
4206   from_start = from_end;
4207   while (PREV_INSN (from_start) != NULL)
4208     from_start = PREV_INSN (from_start);
4209 
4210   SET_PREV_INSN (from_start) = *to_endp;
4211   SET_NEXT_INSN (*to_endp) = from_start;
4212   *to_endp = from_end;
4213 }
4214 
4215 /* Delete notes between HEAD and TAIL and put them in the chain
4216    of notes ended by NOTE_LIST.  */
4217 void
remove_notes(rtx_insn * head,rtx_insn * tail)4218 remove_notes (rtx_insn *head, rtx_insn *tail)
4219 {
4220   rtx_insn *next_tail, *insn, *next;
4221 
4222   note_list = 0;
4223   if (head == tail && !INSN_P (head))
4224     return;
4225 
4226   next_tail = NEXT_INSN (tail);
4227   for (insn = head; insn != next_tail; insn = next)
4228     {
4229       next = NEXT_INSN (insn);
4230       if (!NOTE_P (insn))
4231 	continue;
4232 
4233       switch (NOTE_KIND (insn))
4234 	{
4235 	case NOTE_INSN_BASIC_BLOCK:
4236 	  continue;
4237 
4238 	case NOTE_INSN_EPILOGUE_BEG:
4239 	  if (insn != tail)
4240 	    {
4241 	      remove_insn (insn);
4242 	      /* If an insn was split just before the EPILOGUE_BEG note and
4243 		 that split created new basic blocks, we could have a
4244 		 BASIC_BLOCK note here.  Safely advance over it in that case
4245 		 and assert that we land on a real insn.  */
4246 	      if (NOTE_P (next)
4247 		  && NOTE_KIND (next) == NOTE_INSN_BASIC_BLOCK
4248 		  && next != next_tail)
4249 		next = NEXT_INSN (next);
4250 	      gcc_assert (INSN_P (next));
4251 	      add_reg_note (next, REG_SAVE_NOTE,
4252 			    GEN_INT (NOTE_INSN_EPILOGUE_BEG));
4253 	      break;
4254 	    }
4255 	  /* FALLTHRU */
4256 
4257 	default:
4258 	  remove_insn (insn);
4259 
4260 	  /* Add the note to list that ends at NOTE_LIST.  */
4261 	  SET_PREV_INSN (insn) = note_list;
4262 	  SET_NEXT_INSN (insn) = NULL_RTX;
4263 	  if (note_list)
4264 	    SET_NEXT_INSN (note_list) = insn;
4265 	  note_list = insn;
4266 	  break;
4267 	}
4268 
4269       gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
4270     }
4271 }
4272 
4273 /* A structure to record enough data to allow us to backtrack the scheduler to
4274    a previous state.  */
4275 struct haifa_saved_data
4276 {
4277   /* Next entry on the list.  */
4278   struct haifa_saved_data *next;
4279 
4280   /* Backtracking is associated with scheduling insns that have delay slots.
4281      DELAY_PAIR points to the structure that contains the insns involved, and
4282      the number of cycles between them.  */
4283   struct delay_pair *delay_pair;
4284 
4285   /* Data used by the frontend (e.g. sched-ebb or sched-rgn).  */
4286   void *fe_saved_data;
4287   /* Data used by the backend.  */
4288   void *be_saved_data;
4289 
4290   /* Copies of global state.  */
4291   int clock_var, last_clock_var;
4292   struct ready_list ready;
4293   state_t curr_state;
4294 
4295   rtx_insn *last_scheduled_insn;
4296   rtx_insn *last_nondebug_scheduled_insn;
4297   rtx_insn *nonscheduled_insns_begin;
4298   int cycle_issued_insns;
4299 
4300   /* Copies of state used in the inner loop of schedule_block.  */
4301   struct sched_block_state sched_block;
4302 
4303   /* We don't need to save q_ptr, as its value is arbitrary and we can set it
4304      to 0 when restoring.  */
4305   int q_size;
4306   rtx_insn_list **insn_queue;
4307 
4308   /* Describe pattern replacements that occurred since this backtrack point
4309      was queued.  */
4310   vec<dep_t> replacement_deps;
4311   vec<int> replace_apply;
4312 
4313   /* A copy of the next-cycle replacement vectors at the time of the backtrack
4314      point.  */
4315   vec<dep_t> next_cycle_deps;
4316   vec<int> next_cycle_apply;
4317 };
4318 
4319 /* A record, in reverse order, of all scheduled insns which have delay slots
4320    and may require backtracking.  */
4321 static struct haifa_saved_data *backtrack_queue;
4322 
4323 /* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
4324    to SET_P.  */
4325 static void
mark_backtrack_feeds(rtx_insn * insn,int set_p)4326 mark_backtrack_feeds (rtx_insn *insn, int set_p)
4327 {
4328   sd_iterator_def sd_it;
4329   dep_t dep;
4330   FOR_EACH_DEP (insn, SD_LIST_HARD_BACK, sd_it, dep)
4331     {
4332       FEEDS_BACKTRACK_INSN (DEP_PRO (dep)) = set_p;
4333     }
4334 }
4335 
4336 /* Save the current scheduler state so that we can backtrack to it
4337    later if necessary.  PAIR gives the insns that make it necessary to
4338    save this point.  SCHED_BLOCK is the local state of schedule_block
4339    that need to be saved.  */
4340 static void
save_backtrack_point(struct delay_pair * pair,struct sched_block_state sched_block)4341 save_backtrack_point (struct delay_pair *pair,
4342 		      struct sched_block_state sched_block)
4343 {
4344   int i;
4345   struct haifa_saved_data *save = XNEW (struct haifa_saved_data);
4346 
4347   save->curr_state = xmalloc (dfa_state_size);
4348   memcpy (save->curr_state, curr_state, dfa_state_size);
4349 
4350   save->ready.first = ready.first;
4351   save->ready.n_ready = ready.n_ready;
4352   save->ready.n_debug = ready.n_debug;
4353   save->ready.veclen = ready.veclen;
4354   save->ready.vec = XNEWVEC (rtx_insn *, ready.veclen);
4355   memcpy (save->ready.vec, ready.vec, ready.veclen * sizeof (rtx));
4356 
4357   save->insn_queue = XNEWVEC (rtx_insn_list *, max_insn_queue_index + 1);
4358   save->q_size = q_size;
4359   for (i = 0; i <= max_insn_queue_index; i++)
4360     {
4361       int q = NEXT_Q_AFTER (q_ptr, i);
4362       save->insn_queue[i] = copy_INSN_LIST (insn_queue[q]);
4363     }
4364 
4365   save->clock_var = clock_var;
4366   save->last_clock_var = last_clock_var;
4367   save->cycle_issued_insns = cycle_issued_insns;
4368   save->last_scheduled_insn = last_scheduled_insn;
4369   save->last_nondebug_scheduled_insn = last_nondebug_scheduled_insn;
4370   save->nonscheduled_insns_begin = nonscheduled_insns_begin;
4371 
4372   save->sched_block = sched_block;
4373 
4374   save->replacement_deps.create (0);
4375   save->replace_apply.create (0);
4376   save->next_cycle_deps = next_cycle_replace_deps.copy ();
4377   save->next_cycle_apply = next_cycle_apply.copy ();
4378 
4379   if (current_sched_info->save_state)
4380     save->fe_saved_data = (*current_sched_info->save_state) ();
4381 
4382   if (targetm.sched.alloc_sched_context)
4383     {
4384       save->be_saved_data = targetm.sched.alloc_sched_context ();
4385       targetm.sched.init_sched_context (save->be_saved_data, false);
4386     }
4387   else
4388     save->be_saved_data = NULL;
4389 
4390   save->delay_pair = pair;
4391 
4392   save->next = backtrack_queue;
4393   backtrack_queue = save;
4394 
4395   while (pair)
4396     {
4397       mark_backtrack_feeds (pair->i2, 1);
4398       INSN_TICK (pair->i2) = INVALID_TICK;
4399       INSN_EXACT_TICK (pair->i2) = clock_var + pair_delay (pair);
4400       SHADOW_P (pair->i2) = pair->stages == 0;
4401       pair = pair->next_same_i1;
4402     }
4403 }
4404 
4405 /* Walk the ready list and all queues. If any insns have unresolved backwards
4406    dependencies, these must be cancelled deps, broken by predication.  Set or
4407    clear (depending on SET) the DEP_CANCELLED bit in DEP_STATUS.  */
4408 
4409 static void
toggle_cancelled_flags(bool set)4410 toggle_cancelled_flags (bool set)
4411 {
4412   int i;
4413   sd_iterator_def sd_it;
4414   dep_t dep;
4415 
4416   if (ready.n_ready > 0)
4417     {
4418       rtx_insn **first = ready_lastpos (&ready);
4419       for (i = 0; i < ready.n_ready; i++)
4420 	FOR_EACH_DEP (first[i], SD_LIST_BACK, sd_it, dep)
4421 	  if (!DEBUG_INSN_P (DEP_PRO (dep)))
4422 	    {
4423 	      if (set)
4424 		DEP_STATUS (dep) |= DEP_CANCELLED;
4425 	      else
4426 		DEP_STATUS (dep) &= ~DEP_CANCELLED;
4427 	    }
4428     }
4429   for (i = 0; i <= max_insn_queue_index; i++)
4430     {
4431       int q = NEXT_Q_AFTER (q_ptr, i);
4432       rtx_insn_list *link;
4433       for (link = insn_queue[q]; link; link = link->next ())
4434 	{
4435 	  rtx_insn *insn = link->insn ();
4436 	  FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4437 	    if (!DEBUG_INSN_P (DEP_PRO (dep)))
4438 	      {
4439 		if (set)
4440 		  DEP_STATUS (dep) |= DEP_CANCELLED;
4441 		else
4442 		  DEP_STATUS (dep) &= ~DEP_CANCELLED;
4443 	      }
4444 	}
4445     }
4446 }
4447 
4448 /* Undo the replacements that have occurred after backtrack point SAVE
4449    was placed.  */
4450 static void
undo_replacements_for_backtrack(struct haifa_saved_data * save)4451 undo_replacements_for_backtrack (struct haifa_saved_data *save)
4452 {
4453   while (!save->replacement_deps.is_empty ())
4454     {
4455       dep_t dep = save->replacement_deps.pop ();
4456       int apply_p = save->replace_apply.pop ();
4457 
4458       if (apply_p)
4459 	restore_pattern (dep, true);
4460       else
4461 	apply_replacement (dep, true);
4462     }
4463   save->replacement_deps.release ();
4464   save->replace_apply.release ();
4465 }
4466 
4467 /* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
4468    Restore their dependencies to an unresolved state, and mark them as
4469    queued nowhere.  */
4470 
4471 static void
unschedule_insns_until(rtx_insn * insn)4472 unschedule_insns_until (rtx_insn *insn)
4473 {
4474   auto_vec<rtx_insn *> recompute_vec;
4475 
4476   /* Make two passes over the insns to be unscheduled.  First, we clear out
4477      dependencies and other trivial bookkeeping.  */
4478   for (;;)
4479     {
4480       rtx_insn *last;
4481       sd_iterator_def sd_it;
4482       dep_t dep;
4483 
4484       last = scheduled_insns.pop ();
4485 
4486       /* This will be changed by restore_backtrack_point if the insn is in
4487 	 any queue.  */
4488       QUEUE_INDEX (last) = QUEUE_NOWHERE;
4489       if (last != insn)
4490 	INSN_TICK (last) = INVALID_TICK;
4491 
4492       if (modulo_ii > 0 && INSN_UID (last) < modulo_iter0_max_uid)
4493 	modulo_insns_scheduled--;
4494 
4495       for (sd_it = sd_iterator_start (last, SD_LIST_RES_FORW);
4496 	   sd_iterator_cond (&sd_it, &dep);)
4497 	{
4498 	  rtx_insn *con = DEP_CON (dep);
4499 	  sd_unresolve_dep (sd_it);
4500 	  if (!MUST_RECOMPUTE_SPEC_P (con))
4501 	    {
4502 	      MUST_RECOMPUTE_SPEC_P (con) = 1;
4503 	      recompute_vec.safe_push (con);
4504 	    }
4505 	}
4506 
4507       if (last == insn)
4508 	break;
4509     }
4510 
4511   /* A second pass, to update ready and speculation status for insns
4512      depending on the unscheduled ones.  The first pass must have
4513      popped the scheduled_insns vector up to the point where we
4514      restart scheduling, as recompute_todo_spec requires it to be
4515      up-to-date.  */
4516   while (!recompute_vec.is_empty ())
4517     {
4518       rtx_insn *con;
4519 
4520       con = recompute_vec.pop ();
4521       MUST_RECOMPUTE_SPEC_P (con) = 0;
4522       if (!sd_lists_empty_p (con, SD_LIST_HARD_BACK))
4523 	{
4524 	  TODO_SPEC (con) = HARD_DEP;
4525 	  INSN_TICK (con) = INVALID_TICK;
4526 	  if (PREDICATED_PAT (con) != NULL_RTX)
4527 	    haifa_change_pattern (con, ORIG_PAT (con));
4528 	}
4529       else if (QUEUE_INDEX (con) != QUEUE_SCHEDULED)
4530 	TODO_SPEC (con) = recompute_todo_spec (con, true);
4531     }
4532 }
4533 
4534 /* Restore scheduler state from the topmost entry on the backtracking queue.
4535    PSCHED_BLOCK_P points to the local data of schedule_block that we must
4536    overwrite with the saved data.
4537    The caller must already have called unschedule_insns_until.  */
4538 
4539 static void
restore_last_backtrack_point(struct sched_block_state * psched_block)4540 restore_last_backtrack_point (struct sched_block_state *psched_block)
4541 {
4542   int i;
4543   struct haifa_saved_data *save = backtrack_queue;
4544 
4545   backtrack_queue = save->next;
4546 
4547   if (current_sched_info->restore_state)
4548     (*current_sched_info->restore_state) (save->fe_saved_data);
4549 
4550   if (targetm.sched.alloc_sched_context)
4551     {
4552       targetm.sched.set_sched_context (save->be_saved_data);
4553       targetm.sched.free_sched_context (save->be_saved_data);
4554     }
4555 
4556   /* Do this first since it clobbers INSN_TICK of the involved
4557      instructions.  */
4558   undo_replacements_for_backtrack (save);
4559 
4560   /* Clear the QUEUE_INDEX of everything in the ready list or one
4561      of the queues.  */
4562   if (ready.n_ready > 0)
4563     {
4564       rtx_insn **first = ready_lastpos (&ready);
4565       for (i = 0; i < ready.n_ready; i++)
4566 	{
4567 	  rtx_insn *insn = first[i];
4568 	  QUEUE_INDEX (insn) = QUEUE_NOWHERE;
4569 	  INSN_TICK (insn) = INVALID_TICK;
4570 	}
4571     }
4572   for (i = 0; i <= max_insn_queue_index; i++)
4573     {
4574       int q = NEXT_Q_AFTER (q_ptr, i);
4575 
4576       for (rtx_insn_list *link = insn_queue[q]; link; link = link->next ())
4577 	{
4578 	  rtx_insn *x = link->insn ();
4579 	  QUEUE_INDEX (x) = QUEUE_NOWHERE;
4580 	  INSN_TICK (x) = INVALID_TICK;
4581 	}
4582       free_INSN_LIST_list (&insn_queue[q]);
4583     }
4584 
4585   free (ready.vec);
4586   ready = save->ready;
4587 
4588   if (ready.n_ready > 0)
4589     {
4590       rtx_insn **first = ready_lastpos (&ready);
4591       for (i = 0; i < ready.n_ready; i++)
4592 	{
4593 	  rtx_insn *insn = first[i];
4594 	  QUEUE_INDEX (insn) = QUEUE_READY;
4595 	  TODO_SPEC (insn) = recompute_todo_spec (insn, true);
4596 	  INSN_TICK (insn) = save->clock_var;
4597 	}
4598     }
4599 
4600   q_ptr = 0;
4601   q_size = save->q_size;
4602   for (i = 0; i <= max_insn_queue_index; i++)
4603     {
4604       int q = NEXT_Q_AFTER (q_ptr, i);
4605 
4606       insn_queue[q] = save->insn_queue[q];
4607 
4608       for (rtx_insn_list *link = insn_queue[q]; link; link = link->next ())
4609 	{
4610 	  rtx_insn *x = link->insn ();
4611 	  QUEUE_INDEX (x) = i;
4612 	  TODO_SPEC (x) = recompute_todo_spec (x, true);
4613 	  INSN_TICK (x) = save->clock_var + i;
4614 	}
4615     }
4616   free (save->insn_queue);
4617 
4618   toggle_cancelled_flags (true);
4619 
4620   clock_var = save->clock_var;
4621   last_clock_var = save->last_clock_var;
4622   cycle_issued_insns = save->cycle_issued_insns;
4623   last_scheduled_insn = save->last_scheduled_insn;
4624   last_nondebug_scheduled_insn = save->last_nondebug_scheduled_insn;
4625   nonscheduled_insns_begin = save->nonscheduled_insns_begin;
4626 
4627   *psched_block = save->sched_block;
4628 
4629   memcpy (curr_state, save->curr_state, dfa_state_size);
4630   free (save->curr_state);
4631 
4632   mark_backtrack_feeds (save->delay_pair->i2, 0);
4633 
4634   gcc_assert (next_cycle_replace_deps.is_empty ());
4635   next_cycle_replace_deps = save->next_cycle_deps.copy ();
4636   next_cycle_apply = save->next_cycle_apply.copy ();
4637 
4638   free (save);
4639 
4640   for (save = backtrack_queue; save; save = save->next)
4641     {
4642       mark_backtrack_feeds (save->delay_pair->i2, 1);
4643     }
4644 }
4645 
4646 /* Discard all data associated with the topmost entry in the backtrack
4647    queue.  If RESET_TICK is false, we just want to free the data.  If true,
4648    we are doing this because we discovered a reason to backtrack.  In the
4649    latter case, also reset the INSN_TICK for the shadow insn.  */
4650 static void
free_topmost_backtrack_point(bool reset_tick)4651 free_topmost_backtrack_point (bool reset_tick)
4652 {
4653   struct haifa_saved_data *save = backtrack_queue;
4654   int i;
4655 
4656   backtrack_queue = save->next;
4657 
4658   if (reset_tick)
4659     {
4660       struct delay_pair *pair = save->delay_pair;
4661       while (pair)
4662 	{
4663 	  INSN_TICK (pair->i2) = INVALID_TICK;
4664 	  INSN_EXACT_TICK (pair->i2) = INVALID_TICK;
4665 	  pair = pair->next_same_i1;
4666 	}
4667       undo_replacements_for_backtrack (save);
4668     }
4669   else
4670     {
4671       save->replacement_deps.release ();
4672       save->replace_apply.release ();
4673     }
4674 
4675   if (targetm.sched.free_sched_context)
4676     targetm.sched.free_sched_context (save->be_saved_data);
4677   if (current_sched_info->restore_state)
4678     free (save->fe_saved_data);
4679   for (i = 0; i <= max_insn_queue_index; i++)
4680     free_INSN_LIST_list (&save->insn_queue[i]);
4681   free (save->insn_queue);
4682   free (save->curr_state);
4683   free (save->ready.vec);
4684   free (save);
4685 }
4686 
4687 /* Free the entire backtrack queue.  */
4688 static void
free_backtrack_queue(void)4689 free_backtrack_queue (void)
4690 {
4691   while (backtrack_queue)
4692     free_topmost_backtrack_point (false);
4693 }
4694 
4695 /* Apply a replacement described by DESC.  If IMMEDIATELY is false, we
4696    may have to postpone the replacement until the start of the next cycle,
4697    at which point we will be called again with IMMEDIATELY true.  This is
4698    only done for machines which have instruction packets with explicit
4699    parallelism however.  */
4700 static void
apply_replacement(dep_t dep,bool immediately)4701 apply_replacement (dep_t dep, bool immediately)
4702 {
4703   struct dep_replacement *desc = DEP_REPLACE (dep);
4704   if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4705     {
4706       next_cycle_replace_deps.safe_push (dep);
4707       next_cycle_apply.safe_push (1);
4708     }
4709   else
4710     {
4711       bool success;
4712 
4713       if (QUEUE_INDEX (desc->insn) == QUEUE_SCHEDULED)
4714 	return;
4715 
4716       if (sched_verbose >= 5)
4717 	fprintf (sched_dump, "applying replacement for insn %d\n",
4718 		 INSN_UID (desc->insn));
4719 
4720       success = validate_change (desc->insn, desc->loc, desc->newval, 0);
4721       gcc_assert (success);
4722 
4723       rtx_insn *insn = DEP_PRO (dep);
4724 
4725       /* Recompute priority since dependent priorities may have changed.  */
4726       priority (insn, true);
4727       update_insn_after_change (desc->insn);
4728 
4729       if ((TODO_SPEC (desc->insn) & (HARD_DEP | DEP_POSTPONED)) == 0)
4730 	fix_tick_ready (desc->insn);
4731 
4732       if (backtrack_queue != NULL)
4733 	{
4734 	  backtrack_queue->replacement_deps.safe_push (dep);
4735 	  backtrack_queue->replace_apply.safe_push (1);
4736 	}
4737     }
4738 }
4739 
4740 /* We have determined that a pattern involved in DEP must be restored.
4741    If IMMEDIATELY is false, we may have to postpone the replacement
4742    until the start of the next cycle, at which point we will be called
4743    again with IMMEDIATELY true.  */
4744 static void
restore_pattern(dep_t dep,bool immediately)4745 restore_pattern (dep_t dep, bool immediately)
4746 {
4747   rtx_insn *next = DEP_CON (dep);
4748   int tick = INSN_TICK (next);
4749 
4750   /* If we already scheduled the insn, the modified version is
4751      correct.  */
4752   if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
4753     return;
4754 
4755   if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4756     {
4757       next_cycle_replace_deps.safe_push (dep);
4758       next_cycle_apply.safe_push (0);
4759       return;
4760     }
4761 
4762 
4763   if (DEP_TYPE (dep) == REG_DEP_CONTROL)
4764     {
4765       if (sched_verbose >= 5)
4766 	fprintf (sched_dump, "restoring pattern for insn %d\n",
4767 		 INSN_UID (next));
4768       haifa_change_pattern (next, ORIG_PAT (next));
4769     }
4770   else
4771     {
4772       struct dep_replacement *desc = DEP_REPLACE (dep);
4773       bool success;
4774 
4775       if (sched_verbose >= 5)
4776 	fprintf (sched_dump, "restoring pattern for insn %d\n",
4777 		 INSN_UID (desc->insn));
4778       tick = INSN_TICK (desc->insn);
4779 
4780       success = validate_change (desc->insn, desc->loc, desc->orig, 0);
4781       gcc_assert (success);
4782 
4783       rtx_insn *insn = DEP_PRO (dep);
4784 
4785       if (QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
4786 	{
4787 	  /* Recompute priority since dependent priorities may have changed.  */
4788 	  priority (insn, true);
4789 	}
4790 
4791       update_insn_after_change (desc->insn);
4792 
4793       if (backtrack_queue != NULL)
4794 	{
4795 	  backtrack_queue->replacement_deps.safe_push (dep);
4796 	  backtrack_queue->replace_apply.safe_push (0);
4797 	}
4798     }
4799   INSN_TICK (next) = tick;
4800   if (TODO_SPEC (next) == DEP_POSTPONED)
4801     return;
4802 
4803   if (sd_lists_empty_p (next, SD_LIST_BACK))
4804     TODO_SPEC (next) = 0;
4805   else if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
4806     TODO_SPEC (next) = HARD_DEP;
4807 }
4808 
4809 /* Perform pattern replacements that were queued up until the next
4810    cycle.  */
4811 static void
perform_replacements_new_cycle(void)4812 perform_replacements_new_cycle (void)
4813 {
4814   int i;
4815   dep_t dep;
4816   FOR_EACH_VEC_ELT (next_cycle_replace_deps, i, dep)
4817     {
4818       int apply_p = next_cycle_apply[i];
4819       if (apply_p)
4820 	apply_replacement (dep, true);
4821       else
4822 	restore_pattern (dep, true);
4823     }
4824   next_cycle_replace_deps.truncate (0);
4825   next_cycle_apply.truncate (0);
4826 }
4827 
4828 /* Compute INSN_TICK_ESTIMATE for INSN.  PROCESSED is a bitmap of
4829    instructions we've previously encountered, a set bit prevents
4830    recursion.  BUDGET is a limit on how far ahead we look, it is
4831    reduced on recursive calls.  Return true if we produced a good
4832    estimate, or false if we exceeded the budget.  */
4833 static bool
estimate_insn_tick(bitmap processed,rtx_insn * insn,int budget)4834 estimate_insn_tick (bitmap processed, rtx_insn *insn, int budget)
4835 {
4836   sd_iterator_def sd_it;
4837   dep_t dep;
4838   int earliest = INSN_TICK (insn);
4839 
4840   FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4841     {
4842       rtx_insn *pro = DEP_PRO (dep);
4843       int t;
4844 
4845       if (DEP_STATUS (dep) & DEP_CANCELLED)
4846 	continue;
4847 
4848       if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
4849 	gcc_assert (INSN_TICK (pro) + dep_cost (dep) <= INSN_TICK (insn));
4850       else
4851 	{
4852 	  int cost = dep_cost (dep);
4853 	  if (cost >= budget)
4854 	    return false;
4855 	  if (!bitmap_bit_p (processed, INSN_LUID (pro)))
4856 	    {
4857 	      if (!estimate_insn_tick (processed, pro, budget - cost))
4858 		return false;
4859 	    }
4860 	  gcc_assert (INSN_TICK_ESTIMATE (pro) != INVALID_TICK);
4861 	  t = INSN_TICK_ESTIMATE (pro) + cost;
4862 	  if (earliest == INVALID_TICK || t > earliest)
4863 	    earliest = t;
4864 	}
4865     }
4866   bitmap_set_bit (processed, INSN_LUID (insn));
4867   INSN_TICK_ESTIMATE (insn) = earliest;
4868   return true;
4869 }
4870 
4871 /* Examine the pair of insns in P, and estimate (optimistically, assuming
4872    infinite resources) the cycle in which the delayed shadow can be issued.
4873    Return the number of cycles that must pass before the real insn can be
4874    issued in order to meet this constraint.  */
4875 static int
estimate_shadow_tick(struct delay_pair * p)4876 estimate_shadow_tick (struct delay_pair *p)
4877 {
4878   auto_bitmap processed;
4879   int t;
4880   bool cutoff;
4881 
4882   cutoff = !estimate_insn_tick (processed, p->i2,
4883 				max_insn_queue_index + pair_delay (p));
4884   if (cutoff)
4885     return max_insn_queue_index;
4886   t = INSN_TICK_ESTIMATE (p->i2) - (clock_var + pair_delay (p) + 1);
4887   if (t > 0)
4888     return t;
4889   return 0;
4890 }
4891 
4892 /* If INSN has no unresolved backwards dependencies, add it to the schedule and
4893    recursively resolve all its forward dependencies.  */
4894 static void
resolve_dependencies(rtx_insn * insn)4895 resolve_dependencies (rtx_insn *insn)
4896 {
4897   sd_iterator_def sd_it;
4898   dep_t dep;
4899 
4900   /* Don't use sd_lists_empty_p; it ignores debug insns.  */
4901   if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (insn)) != NULL
4902       || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (insn)) != NULL)
4903     return;
4904 
4905   if (sched_verbose >= 4)
4906     fprintf (sched_dump, ";;\tquickly resolving %d\n", INSN_UID (insn));
4907 
4908   if (QUEUE_INDEX (insn) >= 0)
4909     queue_remove (insn);
4910 
4911   scheduled_insns.safe_push (insn);
4912 
4913   /* Update dependent instructions.  */
4914   for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4915        sd_iterator_cond (&sd_it, &dep);)
4916     {
4917       rtx_insn *next = DEP_CON (dep);
4918 
4919       if (sched_verbose >= 4)
4920 	fprintf (sched_dump, ";;\t\tdep %d against %d\n", INSN_UID (insn),
4921 		 INSN_UID (next));
4922 
4923       /* Resolve the dependence between INSN and NEXT.
4924 	 sd_resolve_dep () moves current dep to another list thus
4925 	 advancing the iterator.  */
4926       sd_resolve_dep (sd_it);
4927 
4928       if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
4929 	{
4930 	  resolve_dependencies (next);
4931 	}
4932       else
4933 	/* Check always has only one forward dependence (to the first insn in
4934 	   the recovery block), therefore, this will be executed only once.  */
4935 	{
4936 	  gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
4937 	}
4938     }
4939 }
4940 
4941 
4942 /* Return the head and tail pointers of ebb starting at BEG and ending
4943    at END.  */
4944 void
get_ebb_head_tail(basic_block beg,basic_block end,rtx_insn ** headp,rtx_insn ** tailp)4945 get_ebb_head_tail (basic_block beg, basic_block end,
4946 		   rtx_insn **headp, rtx_insn **tailp)
4947 {
4948   rtx_insn *beg_head = BB_HEAD (beg);
4949   rtx_insn * beg_tail = BB_END (beg);
4950   rtx_insn * end_head = BB_HEAD (end);
4951   rtx_insn * end_tail = BB_END (end);
4952 
4953   /* Don't include any notes or labels at the beginning of the BEG
4954      basic block, or notes at the end of the END basic blocks.  */
4955 
4956   if (LABEL_P (beg_head))
4957     beg_head = NEXT_INSN (beg_head);
4958 
4959   while (beg_head != beg_tail)
4960     if (NOTE_P (beg_head))
4961       beg_head = NEXT_INSN (beg_head);
4962     else if (DEBUG_INSN_P (beg_head))
4963       {
4964 	rtx_insn * note, *next;
4965 
4966 	for (note = NEXT_INSN (beg_head);
4967 	     note != beg_tail;
4968 	     note = next)
4969 	  {
4970 	    next = NEXT_INSN (note);
4971 	    if (NOTE_P (note))
4972 	      {
4973 		if (sched_verbose >= 9)
4974 		  fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
4975 
4976 		reorder_insns_nobb (note, note, PREV_INSN (beg_head));
4977 
4978 		if (BLOCK_FOR_INSN (note) != beg)
4979 		  df_insn_change_bb (note, beg);
4980 	      }
4981 	    else if (!DEBUG_INSN_P (note))
4982 	      break;
4983 	  }
4984 
4985 	break;
4986       }
4987     else
4988       break;
4989 
4990   *headp = beg_head;
4991 
4992   if (beg == end)
4993     end_head = beg_head;
4994   else if (LABEL_P (end_head))
4995     end_head = NEXT_INSN (end_head);
4996 
4997   while (end_head != end_tail)
4998     if (NOTE_P (end_tail))
4999       end_tail = PREV_INSN (end_tail);
5000     else if (DEBUG_INSN_P (end_tail))
5001       {
5002 	rtx_insn * note, *prev;
5003 
5004 	for (note = PREV_INSN (end_tail);
5005 	     note != end_head;
5006 	     note = prev)
5007 	  {
5008 	    prev = PREV_INSN (note);
5009 	    if (NOTE_P (note))
5010 	      {
5011 		if (sched_verbose >= 9)
5012 		  fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
5013 
5014 		reorder_insns_nobb (note, note, end_tail);
5015 
5016 		if (end_tail == BB_END (end))
5017 		  BB_END (end) = note;
5018 
5019 		if (BLOCK_FOR_INSN (note) != end)
5020 		  df_insn_change_bb (note, end);
5021 	      }
5022 	    else if (!DEBUG_INSN_P (note))
5023 	      break;
5024 	  }
5025 
5026 	break;
5027       }
5028     else
5029       break;
5030 
5031   *tailp = end_tail;
5032 }
5033 
5034 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ].  */
5035 
5036 int
no_real_insns_p(const rtx_insn * head,const rtx_insn * tail)5037 no_real_insns_p (const rtx_insn *head, const rtx_insn *tail)
5038 {
5039   while (head != NEXT_INSN (tail))
5040     {
5041       if (!NOTE_P (head) && !LABEL_P (head))
5042 	return 0;
5043       head = NEXT_INSN (head);
5044     }
5045   return 1;
5046 }
5047 
5048 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
5049    previously found among the insns.  Insert them just before HEAD.  */
5050 rtx_insn *
restore_other_notes(rtx_insn * head,basic_block head_bb)5051 restore_other_notes (rtx_insn *head, basic_block head_bb)
5052 {
5053   if (note_list != 0)
5054     {
5055       rtx_insn *note_head = note_list;
5056 
5057       if (head)
5058 	head_bb = BLOCK_FOR_INSN (head);
5059       else
5060 	head = NEXT_INSN (bb_note (head_bb));
5061 
5062       while (PREV_INSN (note_head))
5063 	{
5064 	  set_block_for_insn (note_head, head_bb);
5065 	  note_head = PREV_INSN (note_head);
5066 	}
5067       /* In the above cycle we've missed this note.  */
5068       set_block_for_insn (note_head, head_bb);
5069 
5070       SET_PREV_INSN (note_head) = PREV_INSN (head);
5071       SET_NEXT_INSN (PREV_INSN (head)) = note_head;
5072       SET_PREV_INSN (head) = note_list;
5073       SET_NEXT_INSN (note_list) = head;
5074 
5075       if (BLOCK_FOR_INSN (head) != head_bb)
5076 	BB_END (head_bb) = note_list;
5077 
5078       head = note_head;
5079     }
5080 
5081   return head;
5082 }
5083 
5084 /* When we know we are going to discard the schedule due to a failed attempt
5085    at modulo scheduling, undo all replacements.  */
5086 static void
undo_all_replacements(void)5087 undo_all_replacements (void)
5088 {
5089   rtx_insn *insn;
5090   int i;
5091 
5092   FOR_EACH_VEC_ELT (scheduled_insns, i, insn)
5093     {
5094       sd_iterator_def sd_it;
5095       dep_t dep;
5096 
5097       /* See if we must undo a replacement.  */
5098       for (sd_it = sd_iterator_start (insn, SD_LIST_RES_FORW);
5099 	   sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
5100 	{
5101 	  struct dep_replacement *desc = DEP_REPLACE (dep);
5102 	  if (desc != NULL)
5103 	    validate_change (desc->insn, desc->loc, desc->orig, 0);
5104 	}
5105     }
5106 }
5107 
5108 /* Return first non-scheduled insn in the current scheduling block.
5109    This is mostly used for debug-counter purposes.  */
5110 static rtx_insn *
first_nonscheduled_insn(void)5111 first_nonscheduled_insn (void)
5112 {
5113   rtx_insn *insn = (nonscheduled_insns_begin != NULL_RTX
5114 		    ? nonscheduled_insns_begin
5115 		    : current_sched_info->prev_head);
5116 
5117   do
5118     {
5119       insn = next_nonnote_nondebug_insn (insn);
5120     }
5121   while (QUEUE_INDEX (insn) == QUEUE_SCHEDULED);
5122 
5123   return insn;
5124 }
5125 
5126 /* Move insns that became ready to fire from queue to ready list.  */
5127 
5128 static void
queue_to_ready(struct ready_list * ready)5129 queue_to_ready (struct ready_list *ready)
5130 {
5131   rtx_insn *insn;
5132   rtx_insn_list *link;
5133   rtx_insn *skip_insn;
5134 
5135   q_ptr = NEXT_Q (q_ptr);
5136 
5137   if (dbg_cnt (sched_insn) == false)
5138     /* If debug counter is activated do not requeue the first
5139        nonscheduled insn.  */
5140     skip_insn = first_nonscheduled_insn ();
5141   else
5142     skip_insn = NULL;
5143 
5144   /* Add all pending insns that can be scheduled without stalls to the
5145      ready list.  */
5146   for (link = insn_queue[q_ptr]; link; link = link->next ())
5147     {
5148       insn = link->insn ();
5149       q_size -= 1;
5150 
5151       if (sched_verbose >= 2)
5152 	fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
5153 		 (*current_sched_info->print_insn) (insn, 0));
5154 
5155       /* If the ready list is full, delay the insn for 1 cycle.
5156 	 See the comment in schedule_block for the rationale.  */
5157       if (!reload_completed
5158 	  && (ready->n_ready - ready->n_debug > param_max_sched_ready_insns
5159 	      || (sched_pressure == SCHED_PRESSURE_MODEL
5160 		  /* Limit pressure recalculations to
5161 		     param_max_sched_ready_insns instructions too.  */
5162 		  && model_index (insn) > (model_curr_point
5163 					   + param_max_sched_ready_insns)))
5164 	  && !(sched_pressure == SCHED_PRESSURE_MODEL
5165 	       && model_curr_point < model_num_insns
5166 	       /* Always allow the next model instruction to issue.  */
5167 	       && model_index (insn) == model_curr_point)
5168 	  && !SCHED_GROUP_P (insn)
5169 	  && insn != skip_insn)
5170 	{
5171 	  if (sched_verbose >= 2)
5172 	    fprintf (sched_dump, "keeping in queue, ready full\n");
5173 	  queue_insn (insn, 1, "ready full");
5174 	}
5175       else
5176 	{
5177 	  ready_add (ready, insn, false);
5178 	  if (sched_verbose >= 2)
5179 	    fprintf (sched_dump, "moving to ready without stalls\n");
5180         }
5181     }
5182   free_INSN_LIST_list (&insn_queue[q_ptr]);
5183 
5184   /* If there are no ready insns, stall until one is ready and add all
5185      of the pending insns at that point to the ready list.  */
5186   if (ready->n_ready == 0)
5187     {
5188       int stalls;
5189 
5190       for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
5191 	{
5192 	  if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5193 	    {
5194 	      for (; link; link = link->next ())
5195 		{
5196 		  insn = link->insn ();
5197 		  q_size -= 1;
5198 
5199 		  if (sched_verbose >= 2)
5200 		    fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
5201 			     (*current_sched_info->print_insn) (insn, 0));
5202 
5203 		  ready_add (ready, insn, false);
5204 		  if (sched_verbose >= 2)
5205 		    fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
5206 		}
5207 	      free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
5208 
5209 	      advance_one_cycle ();
5210 
5211 	      break;
5212 	    }
5213 
5214 	  advance_one_cycle ();
5215 	}
5216 
5217       q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
5218       clock_var += stalls;
5219       if (sched_verbose >= 2)
5220 	fprintf (sched_dump, ";;\tAdvancing clock by %d cycle[s] to %d\n",
5221 		 stalls, clock_var);
5222     }
5223 }
5224 
5225 /* Used by early_queue_to_ready.  Determines whether it is "ok" to
5226    prematurely move INSN from the queue to the ready list.  Currently,
5227    if a target defines the hook 'is_costly_dependence', this function
5228    uses the hook to check whether there exist any dependences which are
5229    considered costly by the target, between INSN and other insns that
5230    have already been scheduled.  Dependences are checked up to Y cycles
5231    back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
5232    controlling this value.
5233    (Other considerations could be taken into account instead (or in
5234    addition) depending on user flags and target hooks.  */
5235 
5236 static bool
ok_for_early_queue_removal(rtx_insn * insn)5237 ok_for_early_queue_removal (rtx_insn *insn)
5238 {
5239   if (targetm.sched.is_costly_dependence)
5240     {
5241       int n_cycles;
5242       int i = scheduled_insns.length ();
5243       for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
5244 	{
5245 	  while (i-- > 0)
5246 	    {
5247 	      int cost;
5248 
5249 	      rtx_insn *prev_insn = scheduled_insns[i];
5250 
5251 	      if (!NOTE_P (prev_insn))
5252 		{
5253 		  dep_t dep;
5254 
5255 		  dep = sd_find_dep_between (prev_insn, insn, true);
5256 
5257 		  if (dep != NULL)
5258 		    {
5259 		      cost = dep_cost (dep);
5260 
5261 		      if (targetm.sched.is_costly_dependence (dep, cost,
5262 				flag_sched_stalled_insns_dep - n_cycles))
5263 			return false;
5264 		    }
5265 		}
5266 
5267 	      if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
5268 		break;
5269 	    }
5270 
5271 	  if (i == 0)
5272 	    break;
5273 	}
5274     }
5275 
5276   return true;
5277 }
5278 
5279 
5280 /* Remove insns from the queue, before they become "ready" with respect
5281    to FU latency considerations.  */
5282 
5283 static int
early_queue_to_ready(state_t state,struct ready_list * ready)5284 early_queue_to_ready (state_t state, struct ready_list *ready)
5285 {
5286   rtx_insn *insn;
5287   rtx_insn_list *link;
5288   rtx_insn_list *next_link;
5289   rtx_insn_list *prev_link;
5290   bool move_to_ready;
5291   int cost;
5292   state_t temp_state = alloca (dfa_state_size);
5293   int stalls;
5294   int insns_removed = 0;
5295 
5296   /*
5297      Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
5298      function:
5299 
5300      X == 0: There is no limit on how many queued insns can be removed
5301              prematurely.  (flag_sched_stalled_insns = -1).
5302 
5303      X >= 1: Only X queued insns can be removed prematurely in each
5304 	     invocation.  (flag_sched_stalled_insns = X).
5305 
5306      Otherwise: Early queue removal is disabled.
5307          (flag_sched_stalled_insns = 0)
5308   */
5309 
5310   if (! flag_sched_stalled_insns)
5311     return 0;
5312 
5313   for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
5314     {
5315       if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5316 	{
5317 	  if (sched_verbose > 6)
5318 	    fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
5319 
5320 	  prev_link = 0;
5321 	  while (link)
5322 	    {
5323 	      next_link = link->next ();
5324 	      insn = link->insn ();
5325 	      if (insn && sched_verbose > 6)
5326 		print_rtl_single (sched_dump, insn);
5327 
5328 	      memcpy (temp_state, state, dfa_state_size);
5329 	      if (recog_memoized (insn) < 0)
5330 		/* non-negative to indicate that it's not ready
5331 		   to avoid infinite Q->R->Q->R... */
5332 		cost = 0;
5333 	      else
5334 		cost = state_transition (temp_state, insn);
5335 
5336 	      if (sched_verbose >= 6)
5337 		fprintf (sched_dump, "transition cost = %d\n", cost);
5338 
5339 	      move_to_ready = false;
5340 	      if (cost < 0)
5341 		{
5342 		  move_to_ready = ok_for_early_queue_removal (insn);
5343 		  if (move_to_ready == true)
5344 		    {
5345 		      /* move from Q to R */
5346 		      q_size -= 1;
5347 		      ready_add (ready, insn, false);
5348 
5349 		      if (prev_link)
5350 			XEXP (prev_link, 1) = next_link;
5351 		      else
5352 			insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
5353 
5354 		      free_INSN_LIST_node (link);
5355 
5356 		      if (sched_verbose >= 2)
5357 			fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
5358 				 (*current_sched_info->print_insn) (insn, 0));
5359 
5360 		      insns_removed++;
5361 		      if (insns_removed == flag_sched_stalled_insns)
5362 			/* Remove no more than flag_sched_stalled_insns insns
5363 			   from Q at a time.  */
5364 			return insns_removed;
5365 		    }
5366 		}
5367 
5368 	      if (move_to_ready == false)
5369 		prev_link = link;
5370 
5371 	      link = next_link;
5372 	    } /* while link */
5373 	} /* if link */
5374 
5375     } /* for stalls.. */
5376 
5377   return insns_removed;
5378 }
5379 
5380 
5381 /* Print the ready list for debugging purposes.
5382    If READY_TRY is non-zero then only print insns that max_issue
5383    will consider.  */
5384 static void
debug_ready_list_1(struct ready_list * ready,signed char * ready_try)5385 debug_ready_list_1 (struct ready_list *ready, signed char *ready_try)
5386 {
5387   rtx_insn **p;
5388   int i;
5389 
5390   if (ready->n_ready == 0)
5391     {
5392       fprintf (sched_dump, "\n");
5393       return;
5394     }
5395 
5396   p = ready_lastpos (ready);
5397   for (i = 0; i < ready->n_ready; i++)
5398     {
5399       if (ready_try != NULL && ready_try[ready->n_ready - i - 1])
5400 	continue;
5401 
5402       fprintf (sched_dump, "  %s:%d",
5403 	       (*current_sched_info->print_insn) (p[i], 0),
5404 	       INSN_LUID (p[i]));
5405       if (sched_pressure != SCHED_PRESSURE_NONE)
5406 	fprintf (sched_dump, "(cost=%d",
5407 		 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
5408       fprintf (sched_dump, ":prio=%d", INSN_PRIORITY (p[i]));
5409       if (INSN_TICK (p[i]) > clock_var)
5410 	fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
5411       if (sched_pressure == SCHED_PRESSURE_MODEL)
5412 	fprintf (sched_dump, ":idx=%d",
5413 		 model_index (p[i]));
5414       if (sched_pressure != SCHED_PRESSURE_NONE)
5415 	fprintf (sched_dump, ")");
5416     }
5417   fprintf (sched_dump, "\n");
5418 }
5419 
5420 /* Print the ready list.  Callable from debugger.  */
5421 static void
debug_ready_list(struct ready_list * ready)5422 debug_ready_list (struct ready_list *ready)
5423 {
5424   debug_ready_list_1 (ready, NULL);
5425 }
5426 
5427 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
5428    NOTEs.  This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
5429    replaces the epilogue note in the correct basic block.  */
5430 void
reemit_notes(rtx_insn * insn)5431 reemit_notes (rtx_insn *insn)
5432 {
5433   rtx note;
5434   rtx_insn *last = insn;
5435 
5436   for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
5437     {
5438       if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
5439 	{
5440 	  enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
5441 
5442 	  last = emit_note_before (note_type, last);
5443 	  remove_note (insn, note);
5444 	  df_insn_create_insn_record (last);
5445 	}
5446     }
5447 }
5448 
5449 /* Move INSN.  Reemit notes if needed.  Update CFG, if needed.  */
5450 static void
move_insn(rtx_insn * insn,rtx_insn * last,rtx nt)5451 move_insn (rtx_insn *insn, rtx_insn *last, rtx nt)
5452 {
5453   if (PREV_INSN (insn) != last)
5454     {
5455       basic_block bb;
5456       rtx_insn *note;
5457       int jump_p = 0;
5458 
5459       bb = BLOCK_FOR_INSN (insn);
5460 
5461       /* BB_HEAD is either LABEL or NOTE.  */
5462       gcc_assert (BB_HEAD (bb) != insn);
5463 
5464       if (BB_END (bb) == insn)
5465 	/* If this is last instruction in BB, move end marker one
5466 	   instruction up.  */
5467 	{
5468 	  /* Jumps are always placed at the end of basic block.  */
5469 	  jump_p = control_flow_insn_p (insn);
5470 
5471 	  gcc_assert (!jump_p
5472 		      || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
5473 			  && IS_SPECULATION_BRANCHY_CHECK_P (insn))
5474 		      || (common_sched_info->sched_pass_id
5475 			  == SCHED_EBB_PASS));
5476 
5477 	  gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
5478 
5479 	  BB_END (bb) = PREV_INSN (insn);
5480 	}
5481 
5482       gcc_assert (BB_END (bb) != last);
5483 
5484       if (jump_p)
5485 	/* We move the block note along with jump.  */
5486 	{
5487 	  gcc_assert (nt);
5488 
5489 	  note = NEXT_INSN (insn);
5490 	  while (NOTE_NOT_BB_P (note) && note != nt)
5491 	    note = NEXT_INSN (note);
5492 
5493 	  if (note != nt
5494 	      && (LABEL_P (note)
5495 		  || BARRIER_P (note)))
5496 	    note = NEXT_INSN (note);
5497 
5498 	  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5499 	}
5500       else
5501 	note = insn;
5502 
5503       SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
5504       SET_PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
5505 
5506       SET_NEXT_INSN (note) = NEXT_INSN (last);
5507       SET_PREV_INSN (NEXT_INSN (last)) = note;
5508 
5509       SET_NEXT_INSN (last) = insn;
5510       SET_PREV_INSN (insn) = last;
5511 
5512       bb = BLOCK_FOR_INSN (last);
5513 
5514       if (jump_p)
5515 	{
5516 	  fix_jump_move (insn);
5517 
5518 	  if (BLOCK_FOR_INSN (insn) != bb)
5519 	    move_block_after_check (insn);
5520 
5521 	  gcc_assert (BB_END (bb) == last);
5522 	}
5523 
5524       df_insn_change_bb (insn, bb);
5525 
5526       /* Update BB_END, if needed.  */
5527       if (BB_END (bb) == last)
5528 	BB_END (bb) = insn;
5529     }
5530 
5531   SCHED_GROUP_P (insn) = 0;
5532 }
5533 
5534 /* Return true if scheduling INSN will finish current clock cycle.  */
5535 static bool
insn_finishes_cycle_p(rtx_insn * insn)5536 insn_finishes_cycle_p (rtx_insn *insn)
5537 {
5538   if (SCHED_GROUP_P (insn))
5539     /* After issuing INSN, rest of the sched_group will be forced to issue
5540        in order.  Don't make any plans for the rest of cycle.  */
5541     return true;
5542 
5543   /* Finishing the block will, apparently, finish the cycle.  */
5544   if (current_sched_info->insn_finishes_block_p
5545       && current_sched_info->insn_finishes_block_p (insn))
5546     return true;
5547 
5548   return false;
5549 }
5550 
5551 /* Helper for autopref_multipass_init.  Given a SET in PAT and whether
5552    we're expecting a memory WRITE or not, check that the insn is relevant to
5553    the autoprefetcher modelling code.  Return true iff that is the case.
5554    If it is relevant, record the base register of the memory op in BASE and
5555    the offset in OFFSET.  */
5556 
5557 static bool
analyze_set_insn_for_autopref(rtx pat,bool write,rtx * base,int * offset)5558 analyze_set_insn_for_autopref (rtx pat, bool write, rtx *base, int *offset)
5559 {
5560   if (GET_CODE (pat) != SET)
5561     return false;
5562 
5563   rtx mem = write ? SET_DEST (pat) : SET_SRC (pat);
5564   if (!MEM_P (mem))
5565     return false;
5566 
5567   struct address_info info;
5568   decompose_mem_address (&info, mem);
5569 
5570   /* TODO: Currently only (base+const) addressing is supported.  */
5571   if (info.base == NULL || !REG_P (*info.base)
5572       || (info.disp != NULL && !CONST_INT_P (*info.disp)))
5573     return false;
5574 
5575   *base = *info.base;
5576   *offset = info.disp ? INTVAL (*info.disp) : 0;
5577   return true;
5578 }
5579 
5580 /* Functions to model cache auto-prefetcher.
5581 
5582    Some of the CPUs have cache auto-prefetcher, which /seems/ to initiate
5583    memory prefetches if it sees instructions with consequitive memory accesses
5584    in the instruction stream.  Details of such hardware units are not published,
5585    so we can only guess what exactly is going on there.
5586    In the scheduler, we model abstract auto-prefetcher.  If there are memory
5587    insns in the ready list (or the queue) that have same memory base, but
5588    different offsets, then we delay the insns with larger offsets until insns
5589    with smaller offsets get scheduled.  If PARAM_SCHED_AUTOPREF_QUEUE_DEPTH
5590    is "1", then we look at the ready list; if it is N>1, then we also look
5591    through N-1 queue entries.
5592    If the param is N>=0, then rank_for_schedule will consider auto-prefetching
5593    among its heuristics.
5594    Param value of "-1" disables modelling of the auto-prefetcher.  */
5595 
5596 /* Initialize autoprefetcher model data for INSN.  */
5597 static void
autopref_multipass_init(const rtx_insn * insn,int write)5598 autopref_multipass_init (const rtx_insn *insn, int write)
5599 {
5600   autopref_multipass_data_t data = &INSN_AUTOPREF_MULTIPASS_DATA (insn)[write];
5601 
5602   gcc_assert (data->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED);
5603   data->base = NULL_RTX;
5604   data->offset = 0;
5605   /* Set insn entry initialized, but not relevant for auto-prefetcher.  */
5606   data->status = AUTOPREF_MULTIPASS_DATA_IRRELEVANT;
5607 
5608   rtx pat = PATTERN (insn);
5609 
5610   /* We have a multi-set insn like a load-multiple or store-multiple.
5611      We care about these as long as all the memory ops inside the PARALLEL
5612      have the same base register.  We care about the minimum and maximum
5613      offsets from that base but don't check for the order of those offsets
5614      within the PARALLEL insn itself.  */
5615   if (GET_CODE (pat) == PARALLEL)
5616     {
5617       int n_elems = XVECLEN (pat, 0);
5618 
5619       int i, offset;
5620       rtx base, prev_base = NULL_RTX;
5621       int min_offset = INT_MAX;
5622 
5623       for (i = 0; i < n_elems; i++)
5624 	{
5625 	  rtx set = XVECEXP (pat, 0, i);
5626 	  if (GET_CODE (set) != SET)
5627 	    return;
5628 
5629 	  if (!analyze_set_insn_for_autopref (set, write, &base, &offset))
5630 	    return;
5631 
5632 	  /* Ensure that all memory operations in the PARALLEL use the same
5633 	     base register.  */
5634 	  if (i > 0 && REGNO (base) != REGNO (prev_base))
5635 	    return;
5636 	  prev_base = base;
5637 	  min_offset = MIN (min_offset, offset);
5638 	}
5639 
5640       /* If we reached here then we have a valid PARALLEL of multiple memory ops
5641 	 with prev_base as the base and min_offset containing the offset.  */
5642       gcc_assert (prev_base);
5643       data->base = prev_base;
5644       data->offset = min_offset;
5645       data->status = AUTOPREF_MULTIPASS_DATA_NORMAL;
5646       return;
5647     }
5648 
5649   /* Otherwise this is a single set memory operation.  */
5650   rtx set = single_set (insn);
5651   if (set == NULL_RTX)
5652     return;
5653 
5654   if (!analyze_set_insn_for_autopref (set, write, &data->base,
5655 				       &data->offset))
5656     return;
5657 
5658   /* This insn is relevant for the auto-prefetcher.
5659      The base and offset fields will have been filled in the
5660      analyze_set_insn_for_autopref call above.  */
5661   data->status = AUTOPREF_MULTIPASS_DATA_NORMAL;
5662 }
5663 
5664 /* Helper function for rank_for_schedule sorting.  */
5665 static int
autopref_rank_for_schedule(const rtx_insn * insn1,const rtx_insn * insn2)5666 autopref_rank_for_schedule (const rtx_insn *insn1, const rtx_insn *insn2)
5667 {
5668   int r = 0;
5669   for (int write = 0; write < 2 && !r; ++write)
5670     {
5671       autopref_multipass_data_t data1
5672 	= &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5673       autopref_multipass_data_t data2
5674 	= &INSN_AUTOPREF_MULTIPASS_DATA (insn2)[write];
5675 
5676       if (data1->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5677 	autopref_multipass_init (insn1, write);
5678 
5679       if (data2->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5680 	autopref_multipass_init (insn2, write);
5681 
5682       int irrel1 = data1->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT;
5683       int irrel2 = data2->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT;
5684 
5685       if (!irrel1 && !irrel2)
5686 	r = data1->offset - data2->offset;
5687       else
5688 	r = irrel2 - irrel1;
5689     }
5690 
5691   return r;
5692 }
5693 
5694 /* True if header of debug dump was printed.  */
5695 static bool autopref_multipass_dfa_lookahead_guard_started_dump_p;
5696 
5697 /* Helper for autopref_multipass_dfa_lookahead_guard.
5698    Return "1" if INSN1 should be delayed in favor of INSN2.  */
5699 static int
autopref_multipass_dfa_lookahead_guard_1(const rtx_insn * insn1,const rtx_insn * insn2,int write)5700 autopref_multipass_dfa_lookahead_guard_1 (const rtx_insn *insn1,
5701 					  const rtx_insn *insn2, int write)
5702 {
5703   autopref_multipass_data_t data1
5704     = &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5705   autopref_multipass_data_t data2
5706     = &INSN_AUTOPREF_MULTIPASS_DATA (insn2)[write];
5707 
5708   if (data2->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5709     autopref_multipass_init (insn2, write);
5710   if (data2->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5711     return 0;
5712 
5713   if (rtx_equal_p (data1->base, data2->base)
5714       && data1->offset > data2->offset)
5715     {
5716       if (sched_verbose >= 2)
5717 	{
5718           if (!autopref_multipass_dfa_lookahead_guard_started_dump_p)
5719 	    {
5720 	      fprintf (sched_dump,
5721 		       ";;\t\tnot trying in max_issue due to autoprefetch "
5722 		       "model: ");
5723 	      autopref_multipass_dfa_lookahead_guard_started_dump_p = true;
5724 	    }
5725 
5726 	  fprintf (sched_dump, " %d(%d)", INSN_UID (insn1), INSN_UID (insn2));
5727 	}
5728 
5729       return 1;
5730     }
5731 
5732   return 0;
5733 }
5734 
5735 /* General note:
5736 
5737    We could have also hooked autoprefetcher model into
5738    first_cycle_multipass_backtrack / first_cycle_multipass_issue hooks
5739    to enable intelligent selection of "[r1+0]=r2; [r1+4]=r3" on the same cycle
5740    (e.g., once "[r1+0]=r2" is issued in max_issue(), "[r1+4]=r3" gets
5741    unblocked).  We don't bother about this yet because target of interest
5742    (ARM Cortex-A15) can issue only 1 memory operation per cycle.  */
5743 
5744 /* Implementation of first_cycle_multipass_dfa_lookahead_guard hook.
5745    Return "1" if INSN1 should not be considered in max_issue due to
5746    auto-prefetcher considerations.  */
5747 int
autopref_multipass_dfa_lookahead_guard(rtx_insn * insn1,int ready_index)5748 autopref_multipass_dfa_lookahead_guard (rtx_insn *insn1, int ready_index)
5749 {
5750   int r = 0;
5751 
5752   /* Exit early if the param forbids this or if we're not entering here through
5753      normal haifa scheduling.  This can happen if selective scheduling is
5754      explicitly enabled.  */
5755   if (!insn_queue || param_sched_autopref_queue_depth <= 0)
5756     return 0;
5757 
5758   if (sched_verbose >= 2 && ready_index == 0)
5759     autopref_multipass_dfa_lookahead_guard_started_dump_p = false;
5760 
5761   for (int write = 0; write < 2; ++write)
5762     {
5763       autopref_multipass_data_t data1
5764 	= &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5765 
5766       if (data1->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5767 	autopref_multipass_init (insn1, write);
5768       if (data1->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5769 	continue;
5770 
5771       if (ready_index == 0
5772 	  && data1->status == AUTOPREF_MULTIPASS_DATA_DONT_DELAY)
5773 	/* We allow only a single delay on priviledged instructions.
5774 	   Doing otherwise would cause infinite loop.  */
5775 	{
5776 	  if (sched_verbose >= 2)
5777 	    {
5778 	      if (!autopref_multipass_dfa_lookahead_guard_started_dump_p)
5779 		{
5780 		  fprintf (sched_dump,
5781 			   ";;\t\tnot trying in max_issue due to autoprefetch "
5782 			   "model: ");
5783 		  autopref_multipass_dfa_lookahead_guard_started_dump_p = true;
5784 		}
5785 
5786 	      fprintf (sched_dump, " *%d*", INSN_UID (insn1));
5787 	    }
5788 	  continue;
5789 	}
5790 
5791       for (int i2 = 0; i2 < ready.n_ready; ++i2)
5792 	{
5793 	  rtx_insn *insn2 = get_ready_element (i2);
5794 	  if (insn1 == insn2)
5795 	    continue;
5796 	  r = autopref_multipass_dfa_lookahead_guard_1 (insn1, insn2, write);
5797 	  if (r)
5798 	    {
5799 	      if (ready_index == 0)
5800 		{
5801 		  r = -1;
5802 		  data1->status = AUTOPREF_MULTIPASS_DATA_DONT_DELAY;
5803 		}
5804 	      goto finish;
5805 	    }
5806 	}
5807 
5808       if (param_sched_autopref_queue_depth == 1)
5809 	continue;
5810 
5811       /* Everything from the current queue slot should have been moved to
5812 	 the ready list.  */
5813       gcc_assert (insn_queue[NEXT_Q_AFTER (q_ptr, 0)] == NULL_RTX);
5814 
5815       int n_stalls = param_sched_autopref_queue_depth - 1;
5816       if (n_stalls > max_insn_queue_index)
5817 	n_stalls = max_insn_queue_index;
5818 
5819       for (int stalls = 1; stalls <= n_stalls; ++stalls)
5820 	{
5821 	  for (rtx_insn_list *link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)];
5822 	       link != NULL_RTX;
5823 	       link = link->next ())
5824 	    {
5825 	      rtx_insn *insn2 = link->insn ();
5826 	      r = autopref_multipass_dfa_lookahead_guard_1 (insn1, insn2,
5827 							    write);
5828 	      if (r)
5829 		{
5830 		  /* Queue INSN1 until INSN2 can issue.  */
5831 		  r = -stalls;
5832 		  if (ready_index == 0)
5833 		    data1->status = AUTOPREF_MULTIPASS_DATA_DONT_DELAY;
5834 		  goto finish;
5835 		}
5836 	    }
5837 	}
5838     }
5839 
5840     finish:
5841   if (sched_verbose >= 2
5842       && autopref_multipass_dfa_lookahead_guard_started_dump_p
5843       && (ready_index == ready.n_ready - 1 || r < 0))
5844     /* This does not /always/ trigger.  We don't output EOL if the last
5845        insn is not recognized (INSN_CODE < 0) and lookahead_guard is not
5846        called.  We can live with this.  */
5847     fprintf (sched_dump, "\n");
5848 
5849   return r;
5850 }
5851 
5852 /* Define type for target data used in multipass scheduling.  */
5853 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
5854 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
5855 #endif
5856 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t;
5857 
5858 /* The following structure describe an entry of the stack of choices.  */
5859 struct choice_entry
5860 {
5861   /* Ordinal number of the issued insn in the ready queue.  */
5862   int index;
5863   /* The number of the rest insns whose issues we should try.  */
5864   int rest;
5865   /* The number of issued essential insns.  */
5866   int n;
5867   /* State after issuing the insn.  */
5868   state_t state;
5869   /* Target-specific data.  */
5870   first_cycle_multipass_data_t target_data;
5871 };
5872 
5873 /* The following array is used to implement a stack of choices used in
5874    function max_issue.  */
5875 static struct choice_entry *choice_stack;
5876 
5877 /* This holds the value of the target dfa_lookahead hook.  */
5878 int dfa_lookahead;
5879 
5880 /* The following variable value is maximal number of tries of issuing
5881    insns for the first cycle multipass insn scheduling.  We define
5882    this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE).  We would not
5883    need this constraint if all real insns (with non-negative codes)
5884    had reservations because in this case the algorithm complexity is
5885    O(DFA_LOOKAHEAD**ISSUE_RATE).  Unfortunately, the dfa descriptions
5886    might be incomplete and such insn might occur.  For such
5887    descriptions, the complexity of algorithm (without the constraint)
5888    could achieve DFA_LOOKAHEAD ** N , where N is the queue length.  */
5889 static int max_lookahead_tries;
5890 
5891 /* The following function returns maximal (or close to maximal) number
5892    of insns which can be issued on the same cycle and one of which
5893    insns is insns with the best rank (the first insn in READY).  To
5894    make this function tries different samples of ready insns.  READY
5895    is current queue `ready'.  Global array READY_TRY reflects what
5896    insns are already issued in this try.  The function stops immediately,
5897    if it reached the such a solution, that all instruction can be issued.
5898    INDEX will contain index of the best insn in READY.  The following
5899    function is used only for first cycle multipass scheduling.
5900 
5901    PRIVILEGED_N >= 0
5902 
5903    This function expects recognized insns only.  All USEs,
5904    CLOBBERs, etc must be filtered elsewhere.  */
5905 int
max_issue(struct ready_list * ready,int privileged_n,state_t state,bool first_cycle_insn_p,int * index)5906 max_issue (struct ready_list *ready, int privileged_n, state_t state,
5907 	   bool first_cycle_insn_p, int *index)
5908 {
5909   int n, i, all, n_ready, best, delay, tries_num;
5910   int more_issue;
5911   struct choice_entry *top;
5912   rtx_insn *insn;
5913 
5914   if (sched_fusion)
5915     return 0;
5916 
5917   n_ready = ready->n_ready;
5918   gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
5919 	      && privileged_n <= n_ready);
5920 
5921   /* Init MAX_LOOKAHEAD_TRIES.  */
5922   if (max_lookahead_tries == 0)
5923     {
5924       max_lookahead_tries = 100;
5925       for (i = 0; i < issue_rate; i++)
5926 	max_lookahead_tries *= dfa_lookahead;
5927     }
5928 
5929   /* Init max_points.  */
5930   more_issue = issue_rate - cycle_issued_insns;
5931   gcc_assert (more_issue >= 0);
5932 
5933   /* The number of the issued insns in the best solution.  */
5934   best = 0;
5935 
5936   top = choice_stack;
5937 
5938   /* Set initial state of the search.  */
5939   memcpy (top->state, state, dfa_state_size);
5940   top->rest = dfa_lookahead;
5941   top->n = 0;
5942   if (targetm.sched.first_cycle_multipass_begin)
5943     targetm.sched.first_cycle_multipass_begin (&top->target_data,
5944 					       ready_try, n_ready,
5945 					       first_cycle_insn_p);
5946 
5947   /* Count the number of the insns to search among.  */
5948   for (all = i = 0; i < n_ready; i++)
5949     if (!ready_try [i])
5950       all++;
5951 
5952   if (sched_verbose >= 2)
5953     {
5954       fprintf (sched_dump, ";;\t\tmax_issue among %d insns:", all);
5955       debug_ready_list_1 (ready, ready_try);
5956     }
5957 
5958   /* I is the index of the insn to try next.  */
5959   i = 0;
5960   tries_num = 0;
5961   for (;;)
5962     {
5963       if (/* If we've reached a dead end or searched enough of what we have
5964 	     been asked...  */
5965 	  top->rest == 0
5966 	  /* or have nothing else to try...  */
5967 	  || i >= n_ready
5968 	  /* or should not issue more.  */
5969 	  || top->n >= more_issue)
5970 	{
5971 	  /* ??? (... || i == n_ready).  */
5972 	  gcc_assert (i <= n_ready);
5973 
5974 	  /* We should not issue more than issue_rate instructions.  */
5975 	  gcc_assert (top->n <= more_issue);
5976 
5977 	  if (top == choice_stack)
5978 	    break;
5979 
5980 	  if (best < top - choice_stack)
5981 	    {
5982 	      if (privileged_n)
5983 		{
5984 		  n = privileged_n;
5985 		  /* Try to find issued privileged insn.  */
5986 		  while (n && !ready_try[--n])
5987 		    ;
5988 		}
5989 
5990 	      if (/* If all insns are equally good...  */
5991 		  privileged_n == 0
5992 		  /* Or a privileged insn will be issued.  */
5993 		  || ready_try[n])
5994 		/* Then we have a solution.  */
5995 		{
5996 		  best = top - choice_stack;
5997 		  /* This is the index of the insn issued first in this
5998 		     solution.  */
5999 		  *index = choice_stack [1].index;
6000 		  if (top->n == more_issue || best == all)
6001 		    break;
6002 		}
6003 	    }
6004 
6005 	  /* Set ready-list index to point to the last insn
6006 	     ('i++' below will advance it to the next insn).  */
6007 	  i = top->index;
6008 
6009 	  /* Backtrack.  */
6010 	  ready_try [i] = 0;
6011 
6012 	  if (targetm.sched.first_cycle_multipass_backtrack)
6013 	    targetm.sched.first_cycle_multipass_backtrack (&top->target_data,
6014 							   ready_try, n_ready);
6015 
6016 	  top--;
6017 	  memcpy (state, top->state, dfa_state_size);
6018 	}
6019       else if (!ready_try [i])
6020 	{
6021 	  tries_num++;
6022 	  if (tries_num > max_lookahead_tries)
6023 	    break;
6024 	  insn = ready_element (ready, i);
6025 	  delay = state_transition (state, insn);
6026 	  if (delay < 0)
6027 	    {
6028 	      if (state_dead_lock_p (state)
6029 		  || insn_finishes_cycle_p (insn))
6030 		/* We won't issue any more instructions in the next
6031 		   choice_state.  */
6032 		top->rest = 0;
6033 	      else
6034 		top->rest--;
6035 
6036 	      n = top->n;
6037 	      if (memcmp (top->state, state, dfa_state_size) != 0)
6038 		n++;
6039 
6040 	      /* Advance to the next choice_entry.  */
6041 	      top++;
6042 	      /* Initialize it.  */
6043 	      top->rest = dfa_lookahead;
6044 	      top->index = i;
6045 	      top->n = n;
6046 	      memcpy (top->state, state, dfa_state_size);
6047 	      ready_try [i] = 1;
6048 
6049 	      if (targetm.sched.first_cycle_multipass_issue)
6050 		targetm.sched.first_cycle_multipass_issue (&top->target_data,
6051 							   ready_try, n_ready,
6052 							   insn,
6053 							   &((top - 1)
6054 							     ->target_data));
6055 
6056 	      i = -1;
6057 	    }
6058 	}
6059 
6060       /* Increase ready-list index.  */
6061       i++;
6062     }
6063 
6064   if (targetm.sched.first_cycle_multipass_end)
6065     targetm.sched.first_cycle_multipass_end (best != 0
6066 					     ? &choice_stack[1].target_data
6067 					     : NULL);
6068 
6069   /* Restore the original state of the DFA.  */
6070   memcpy (state, choice_stack->state, dfa_state_size);
6071 
6072   return best;
6073 }
6074 
6075 /* The following function chooses insn from READY and modifies
6076    READY.  The following function is used only for first
6077    cycle multipass scheduling.
6078    Return:
6079    -1 if cycle should be advanced,
6080    0 if INSN_PTR is set to point to the desirable insn,
6081    1 if choose_ready () should be restarted without advancing the cycle.  */
6082 static int
choose_ready(struct ready_list * ready,bool first_cycle_insn_p,rtx_insn ** insn_ptr)6083 choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
6084 	      rtx_insn **insn_ptr)
6085 {
6086   if (dbg_cnt (sched_insn) == false)
6087     {
6088       if (nonscheduled_insns_begin == NULL_RTX)
6089 	nonscheduled_insns_begin = current_sched_info->prev_head;
6090 
6091       rtx_insn *insn = first_nonscheduled_insn ();
6092 
6093       if (QUEUE_INDEX (insn) == QUEUE_READY)
6094 	/* INSN is in the ready_list.  */
6095 	{
6096 	  ready_remove_insn (insn);
6097 	  *insn_ptr = insn;
6098 	  return 0;
6099 	}
6100 
6101       /* INSN is in the queue.  Advance cycle to move it to the ready list.  */
6102       gcc_assert (QUEUE_INDEX (insn) >= 0);
6103       return -1;
6104     }
6105 
6106   if (dfa_lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
6107       || DEBUG_INSN_P (ready_element (ready, 0)))
6108     {
6109       if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
6110 	*insn_ptr = ready_remove_first_dispatch (ready);
6111       else
6112 	*insn_ptr = ready_remove_first (ready);
6113 
6114       return 0;
6115     }
6116   else
6117     {
6118       /* Try to choose the best insn.  */
6119       int index = 0, i;
6120       rtx_insn *insn;
6121 
6122       insn = ready_element (ready, 0);
6123       if (INSN_CODE (insn) < 0)
6124 	{
6125 	  *insn_ptr = ready_remove_first (ready);
6126 	  return 0;
6127 	}
6128 
6129       /* Filter the search space.  */
6130       for (i = 0; i < ready->n_ready; i++)
6131 	{
6132 	  ready_try[i] = 0;
6133 
6134 	  insn = ready_element (ready, i);
6135 
6136 	  /* If this insn is recognizable we should have already
6137 	     recognized it earlier.
6138 	     ??? Not very clear where this is supposed to be done.
6139 	     See dep_cost_1.  */
6140 	  gcc_checking_assert (INSN_CODE (insn) >= 0
6141 			       || recog_memoized (insn) < 0);
6142 	  if (INSN_CODE (insn) < 0)
6143 	    {
6144 	      /* Non-recognized insns at position 0 are handled above.  */
6145 	      gcc_assert (i > 0);
6146 	      ready_try[i] = 1;
6147 	      continue;
6148 	    }
6149 
6150 	  if (targetm.sched.first_cycle_multipass_dfa_lookahead_guard)
6151 	    {
6152 	      ready_try[i]
6153 		= (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
6154 		    (insn, i));
6155 
6156 	      if (ready_try[i] < 0)
6157 		/* Queue instruction for several cycles.
6158 		   We need to restart choose_ready as we have changed
6159 		   the ready list.  */
6160 		{
6161 		  change_queue_index (insn, -ready_try[i]);
6162 		  return 1;
6163 		}
6164 
6165 	      /* Make sure that we didn't end up with 0'th insn filtered out.
6166 		 Don't be tempted to make life easier for backends and just
6167 		 requeue 0'th insn if (ready_try[0] == 0) and restart
6168 		 choose_ready.  Backends should be very considerate about
6169 		 requeueing instructions -- especially the highest priority
6170 		 one at position 0.  */
6171 	      gcc_assert (ready_try[i] == 0 || i > 0);
6172 	      if (ready_try[i])
6173 		continue;
6174 	    }
6175 
6176 	  gcc_assert (ready_try[i] == 0);
6177 	  /* INSN made it through the scrutiny of filters!  */
6178 	}
6179 
6180       if (max_issue (ready, 1, curr_state, first_cycle_insn_p, &index) == 0)
6181 	{
6182 	  *insn_ptr = ready_remove_first (ready);
6183 	  if (sched_verbose >= 4)
6184 	    fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
6185                      (*current_sched_info->print_insn) (*insn_ptr, 0));
6186 	  return 0;
6187 	}
6188       else
6189 	{
6190 	  if (sched_verbose >= 4)
6191 	    fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
6192 		     (*current_sched_info->print_insn)
6193 		     (ready_element (ready, index), 0));
6194 
6195 	  *insn_ptr = ready_remove (ready, index);
6196 	  return 0;
6197 	}
6198     }
6199 }
6200 
6201 /* This function is called when we have successfully scheduled a
6202    block.  It uses the schedule stored in the scheduled_insns vector
6203    to rearrange the RTL.  PREV_HEAD is used as the anchor to which we
6204    append the scheduled insns; TAIL is the insn after the scheduled
6205    block.  TARGET_BB is the argument passed to schedule_block.  */
6206 
6207 static void
commit_schedule(rtx_insn * prev_head,rtx_insn * tail,basic_block * target_bb)6208 commit_schedule (rtx_insn *prev_head, rtx_insn *tail, basic_block *target_bb)
6209 {
6210   unsigned int i;
6211   rtx_insn *insn;
6212 
6213   last_scheduled_insn = prev_head;
6214   for (i = 0;
6215        scheduled_insns.iterate (i, &insn);
6216        i++)
6217     {
6218       if (control_flow_insn_p (last_scheduled_insn)
6219 	  || current_sched_info->advance_target_bb (*target_bb, insn))
6220 	{
6221 	  *target_bb = current_sched_info->advance_target_bb (*target_bb, 0);
6222 
6223 	  if (sched_verbose)
6224 	    {
6225 	      rtx_insn *x;
6226 
6227 	      x = next_real_insn (last_scheduled_insn);
6228 	      gcc_assert (x);
6229 	      dump_new_block_header (1, *target_bb, x, tail);
6230 	    }
6231 
6232 	  last_scheduled_insn = bb_note (*target_bb);
6233 	}
6234 
6235       if (current_sched_info->begin_move_insn)
6236 	(*current_sched_info->begin_move_insn) (insn, last_scheduled_insn);
6237       move_insn (insn, last_scheduled_insn,
6238 		 current_sched_info->next_tail);
6239       if (!DEBUG_INSN_P (insn))
6240 	reemit_notes (insn);
6241       last_scheduled_insn = insn;
6242     }
6243 
6244   scheduled_insns.truncate (0);
6245 }
6246 
6247 /* Examine all insns on the ready list and queue those which can't be
6248    issued in this cycle.  TEMP_STATE is temporary scheduler state we
6249    can use as scratch space.  If FIRST_CYCLE_INSN_P is true, no insns
6250    have been issued for the current cycle, which means it is valid to
6251    issue an asm statement.
6252 
6253    If SHADOWS_ONLY_P is true, we eliminate all real insns and only
6254    leave those for which SHADOW_P is true.  If MODULO_EPILOGUE is true,
6255    we only leave insns which have an INSN_EXACT_TICK.  */
6256 
6257 static void
prune_ready_list(state_t temp_state,bool first_cycle_insn_p,bool shadows_only_p,bool modulo_epilogue_p)6258 prune_ready_list (state_t temp_state, bool first_cycle_insn_p,
6259 		  bool shadows_only_p, bool modulo_epilogue_p)
6260 {
6261   int i, pass;
6262   bool sched_group_found = false;
6263   int min_cost_group = 0;
6264 
6265   if (sched_fusion)
6266     return;
6267 
6268   for (i = 0; i < ready.n_ready; i++)
6269     {
6270       rtx_insn *insn = ready_element (&ready, i);
6271       if (SCHED_GROUP_P (insn))
6272 	{
6273 	  sched_group_found = true;
6274 	  break;
6275 	}
6276     }
6277 
6278   /* Make two passes if there's a SCHED_GROUP_P insn; make sure to handle
6279      such an insn first and note its cost.  If at least one SCHED_GROUP_P insn
6280      gets queued, then all other insns get queued for one cycle later.  */
6281   for (pass = sched_group_found ? 0 : 1; pass < 2; )
6282     {
6283       int n = ready.n_ready;
6284       for (i = 0; i < n; i++)
6285 	{
6286 	  rtx_insn *insn = ready_element (&ready, i);
6287 	  int cost = 0;
6288 	  const char *reason = "resource conflict";
6289 
6290 	  if (DEBUG_INSN_P (insn))
6291 	    continue;
6292 
6293 	  if (sched_group_found && !SCHED_GROUP_P (insn)
6294 	      && ((pass == 0) || (min_cost_group >= 1)))
6295 	    {
6296 	      if (pass == 0)
6297 		continue;
6298 	      cost = min_cost_group;
6299 	      reason = "not in sched group";
6300 	    }
6301 	  else if (modulo_epilogue_p
6302 		   && INSN_EXACT_TICK (insn) == INVALID_TICK)
6303 	    {
6304 	      cost = max_insn_queue_index;
6305 	      reason = "not an epilogue insn";
6306 	    }
6307 	  else if (shadows_only_p && !SHADOW_P (insn))
6308 	    {
6309 	      cost = 1;
6310 	      reason = "not a shadow";
6311 	    }
6312 	  else if (recog_memoized (insn) < 0)
6313 	    {
6314 	      if (!first_cycle_insn_p
6315 		  && (GET_CODE (PATTERN (insn)) == ASM_INPUT
6316 		      || asm_noperands (PATTERN (insn)) >= 0))
6317 		cost = 1;
6318 	      reason = "asm";
6319 	    }
6320 	  else if (sched_pressure != SCHED_PRESSURE_NONE)
6321 	    {
6322 	      if (sched_pressure == SCHED_PRESSURE_MODEL
6323 		  && INSN_TICK (insn) <= clock_var)
6324 		{
6325 		  memcpy (temp_state, curr_state, dfa_state_size);
6326 		  if (state_transition (temp_state, insn) >= 0)
6327 		    INSN_TICK (insn) = clock_var + 1;
6328 		}
6329 	      cost = 0;
6330 	    }
6331 	  else
6332 	    {
6333 	      int delay_cost = 0;
6334 
6335 	      if (delay_htab)
6336 		{
6337 		  struct delay_pair *delay_entry;
6338 		  delay_entry
6339 		    = delay_htab->find_with_hash (insn,
6340 						  htab_hash_pointer (insn));
6341 		  while (delay_entry && delay_cost == 0)
6342 		    {
6343 		      delay_cost = estimate_shadow_tick (delay_entry);
6344 		      if (delay_cost > max_insn_queue_index)
6345 			delay_cost = max_insn_queue_index;
6346 		      delay_entry = delay_entry->next_same_i1;
6347 		    }
6348 		}
6349 
6350 	      memcpy (temp_state, curr_state, dfa_state_size);
6351 	      cost = state_transition (temp_state, insn);
6352 	      if (cost < 0)
6353 		cost = 0;
6354 	      else if (cost == 0)
6355 		cost = 1;
6356 	      if (cost < delay_cost)
6357 		{
6358 		  cost = delay_cost;
6359 		  reason = "shadow tick";
6360 		}
6361 	    }
6362 	  if (cost >= 1)
6363 	    {
6364 	      if (SCHED_GROUP_P (insn) && cost > min_cost_group)
6365 		min_cost_group = cost;
6366 	      ready_remove (&ready, i);
6367 	      /* Normally we'd want to queue INSN for COST cycles.  However,
6368 		 if SCHED_GROUP_P is set, then we must ensure that nothing
6369 		 else comes between INSN and its predecessor.  If there is
6370 		 some other insn ready to fire on the next cycle, then that
6371 		 invariant would be broken.
6372 
6373 		 So when SCHED_GROUP_P is set, just queue this insn for a
6374 		 single cycle.  */
6375 	      queue_insn (insn, SCHED_GROUP_P (insn) ? 1 : cost, reason);
6376 	      if (i + 1 < n)
6377 		break;
6378 	    }
6379 	}
6380       if (i == n)
6381 	pass++;
6382     }
6383 }
6384 
6385 /* Called when we detect that the schedule is impossible.  We examine the
6386    backtrack queue to find the earliest insn that caused this condition.  */
6387 
6388 static struct haifa_saved_data *
verify_shadows(void)6389 verify_shadows (void)
6390 {
6391   struct haifa_saved_data *save, *earliest_fail = NULL;
6392   for (save = backtrack_queue; save; save = save->next)
6393     {
6394       int t;
6395       struct delay_pair *pair = save->delay_pair;
6396       rtx_insn *i1 = pair->i1;
6397 
6398       for (; pair; pair = pair->next_same_i1)
6399 	{
6400 	  rtx_insn *i2 = pair->i2;
6401 
6402 	  if (QUEUE_INDEX (i2) == QUEUE_SCHEDULED)
6403 	    continue;
6404 
6405 	  t = INSN_TICK (i1) + pair_delay (pair);
6406 	  if (t < clock_var)
6407 	    {
6408 	      if (sched_verbose >= 2)
6409 		fprintf (sched_dump,
6410 			 ";;\t\tfailed delay requirements for %d/%d (%d->%d)"
6411 			 ", not ready\n",
6412 			 INSN_UID (pair->i1), INSN_UID (pair->i2),
6413 			 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
6414 	      earliest_fail = save;
6415 	      break;
6416 	    }
6417 	  if (QUEUE_INDEX (i2) >= 0)
6418 	    {
6419 	      int queued_for = INSN_TICK (i2);
6420 
6421 	      if (t < queued_for)
6422 		{
6423 		  if (sched_verbose >= 2)
6424 		    fprintf (sched_dump,
6425 			     ";;\t\tfailed delay requirements for %d/%d"
6426 			     " (%d->%d), queued too late\n",
6427 			     INSN_UID (pair->i1), INSN_UID (pair->i2),
6428 			     INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
6429 		  earliest_fail = save;
6430 		  break;
6431 		}
6432 	    }
6433 	}
6434     }
6435 
6436   return earliest_fail;
6437 }
6438 
6439 /* Print instructions together with useful scheduling information between
6440    HEAD and TAIL (inclusive).  */
6441 static void
dump_insn_stream(rtx_insn * head,rtx_insn * tail)6442 dump_insn_stream (rtx_insn *head, rtx_insn *tail)
6443 {
6444   fprintf (sched_dump, ";;\t| insn | prio |\n");
6445 
6446   rtx_insn *next_tail = NEXT_INSN (tail);
6447   for (rtx_insn *insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6448     {
6449       int priority = NOTE_P (insn) ? 0 : INSN_PRIORITY (insn);
6450       const char *pattern = (NOTE_P (insn)
6451 			     ? "note"
6452 			     : str_pattern_slim (PATTERN (insn)));
6453 
6454       fprintf (sched_dump, ";;\t| %4d | %4d | %-30s ",
6455 	       INSN_UID (insn), priority, pattern);
6456 
6457       if (sched_verbose >= 4)
6458 	{
6459 	  if (NOTE_P (insn) || LABEL_P (insn) || recog_memoized (insn) < 0)
6460 	    fprintf (sched_dump, "nothing");
6461 	  else
6462 	    print_reservation (sched_dump, insn);
6463 	}
6464       fprintf (sched_dump, "\n");
6465     }
6466 }
6467 
6468 /* Use forward list scheduling to rearrange insns of block pointed to by
6469    TARGET_BB, possibly bringing insns from subsequent blocks in the same
6470    region.  */
6471 
6472 bool
schedule_block(basic_block * target_bb,state_t init_state)6473 schedule_block (basic_block *target_bb, state_t init_state)
6474 {
6475   int i;
6476   bool success = modulo_ii == 0;
6477   struct sched_block_state ls;
6478   state_t temp_state = NULL;  /* It is used for multipass scheduling.  */
6479   int sort_p, advance, start_clock_var;
6480 
6481   /* Head/tail info for this block.  */
6482   rtx_insn *prev_head = current_sched_info->prev_head;
6483   rtx_insn *next_tail = current_sched_info->next_tail;
6484   rtx_insn *head = NEXT_INSN (prev_head);
6485   rtx_insn *tail = PREV_INSN (next_tail);
6486 
6487   if ((current_sched_info->flags & DONT_BREAK_DEPENDENCIES) == 0
6488       && sched_pressure != SCHED_PRESSURE_MODEL && !sched_fusion)
6489     find_modifiable_mems (head, tail);
6490 
6491   /* We used to have code to avoid getting parameters moved from hard
6492      argument registers into pseudos.
6493 
6494      However, it was removed when it proved to be of marginal benefit
6495      and caused problems because schedule_block and compute_forward_dependences
6496      had different notions of what the "head" insn was.  */
6497 
6498   gcc_assert (head != tail || INSN_P (head));
6499 
6500   haifa_recovery_bb_recently_added_p = false;
6501 
6502   backtrack_queue = NULL;
6503 
6504   /* Debug info.  */
6505   if (sched_verbose)
6506     {
6507       dump_new_block_header (0, *target_bb, head, tail);
6508 
6509       if (sched_verbose >= 2)
6510 	{
6511 	  dump_insn_stream (head, tail);
6512 	  memset (&rank_for_schedule_stats, 0,
6513 		  sizeof (rank_for_schedule_stats));
6514 	}
6515     }
6516 
6517   if (init_state == NULL)
6518     state_reset (curr_state);
6519   else
6520     memcpy (curr_state, init_state, dfa_state_size);
6521 
6522   /* Clear the ready list.  */
6523   ready.first = ready.veclen - 1;
6524   ready.n_ready = 0;
6525   ready.n_debug = 0;
6526 
6527   /* It is used for first cycle multipass scheduling.  */
6528   temp_state = alloca (dfa_state_size);
6529 
6530   if (targetm.sched.init)
6531     targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
6532 
6533   /* We start inserting insns after PREV_HEAD.  */
6534   last_scheduled_insn = prev_head;
6535   last_nondebug_scheduled_insn = NULL;
6536   nonscheduled_insns_begin = NULL;
6537 
6538   gcc_assert ((NOTE_P (last_scheduled_insn)
6539 	       || DEBUG_INSN_P (last_scheduled_insn))
6540 	      && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
6541 
6542   /* Initialize INSN_QUEUE.  Q_SIZE is the total number of insns in the
6543      queue.  */
6544   q_ptr = 0;
6545   q_size = 0;
6546 
6547   insn_queue = XALLOCAVEC (rtx_insn_list *, max_insn_queue_index + 1);
6548   memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
6549 
6550   /* Start just before the beginning of time.  */
6551   clock_var = -1;
6552 
6553   /* We need queue and ready lists and clock_var be initialized
6554      in try_ready () (which is called through init_ready_list ()).  */
6555   (*current_sched_info->init_ready_list) ();
6556 
6557   if (sched_pressure)
6558     sched_pressure_start_bb (*target_bb);
6559 
6560   /* The algorithm is O(n^2) in the number of ready insns at any given
6561      time in the worst case.  Before reload we are more likely to have
6562      big lists so truncate them to a reasonable size.  */
6563   if (!reload_completed
6564       && ready.n_ready - ready.n_debug > param_max_sched_ready_insns)
6565     {
6566       ready_sort_debug (&ready);
6567       ready_sort_real (&ready);
6568 
6569       /* Find first free-standing insn past param_max_sched_ready_insns.
6570          If there are debug insns, we know they're first.  */
6571       for (i = param_max_sched_ready_insns + ready.n_debug; i < ready.n_ready;
6572 	   i++)
6573 	if (!SCHED_GROUP_P (ready_element (&ready, i)))
6574 	  break;
6575 
6576       if (sched_verbose >= 2)
6577 	{
6578 	  fprintf (sched_dump,
6579 		   ";;\t\tReady list on entry: %d insns:  ", ready.n_ready);
6580 	  debug_ready_list (&ready);
6581 	  fprintf (sched_dump,
6582 		   ";;\t\t before reload => truncated to %d insns\n", i);
6583 	}
6584 
6585       /* Delay all insns past it for 1 cycle.  If debug counter is
6586 	 activated make an exception for the insn right after
6587 	 nonscheduled_insns_begin.  */
6588       {
6589 	rtx_insn *skip_insn;
6590 
6591 	if (dbg_cnt (sched_insn) == false)
6592 	  skip_insn = first_nonscheduled_insn ();
6593 	else
6594 	  skip_insn = NULL;
6595 
6596 	while (i < ready.n_ready)
6597 	  {
6598 	    rtx_insn *insn;
6599 
6600 	    insn = ready_remove (&ready, i);
6601 
6602 	    if (insn != skip_insn)
6603 	      queue_insn (insn, 1, "list truncated");
6604 	  }
6605 	if (skip_insn)
6606 	  ready_add (&ready, skip_insn, true);
6607       }
6608     }
6609 
6610   /* Now we can restore basic block notes and maintain precise cfg.  */
6611   restore_bb_notes (*target_bb);
6612 
6613   last_clock_var = -1;
6614 
6615   advance = 0;
6616 
6617   gcc_assert (scheduled_insns.length () == 0);
6618   sort_p = TRUE;
6619   must_backtrack = false;
6620   modulo_insns_scheduled = 0;
6621 
6622   ls.modulo_epilogue = false;
6623   ls.first_cycle_insn_p = true;
6624 
6625   /* Loop until all the insns in BB are scheduled.  */
6626   while ((*current_sched_info->schedule_more_p) ())
6627     {
6628       perform_replacements_new_cycle ();
6629       do
6630 	{
6631 	  start_clock_var = clock_var;
6632 
6633 	  clock_var++;
6634 
6635 	  advance_one_cycle ();
6636 
6637 	  /* Add to the ready list all pending insns that can be issued now.
6638 	     If there are no ready insns, increment clock until one
6639 	     is ready and add all pending insns at that point to the ready
6640 	     list.  */
6641 	  queue_to_ready (&ready);
6642 
6643 	  gcc_assert (ready.n_ready);
6644 
6645 	  if (sched_verbose >= 2)
6646 	    {
6647 	      fprintf (sched_dump, ";;\t\tReady list after queue_to_ready:");
6648 	      debug_ready_list (&ready);
6649 	    }
6650 	  advance -= clock_var - start_clock_var;
6651 	}
6652       while (advance > 0);
6653 
6654       if (ls.modulo_epilogue)
6655 	{
6656 	  int stage = clock_var / modulo_ii;
6657 	  if (stage > modulo_last_stage * 2 + 2)
6658 	    {
6659 	      if (sched_verbose >= 2)
6660 		fprintf (sched_dump,
6661 			 ";;\t\tmodulo scheduled succeeded at II %d\n",
6662 			 modulo_ii);
6663 	      success = true;
6664 	      goto end_schedule;
6665 	    }
6666 	}
6667       else if (modulo_ii > 0)
6668 	{
6669 	  int stage = clock_var / modulo_ii;
6670 	  if (stage > modulo_max_stages)
6671 	    {
6672 	      if (sched_verbose >= 2)
6673 		fprintf (sched_dump,
6674 			 ";;\t\tfailing schedule due to excessive stages\n");
6675 	      goto end_schedule;
6676 	    }
6677 	  if (modulo_n_insns == modulo_insns_scheduled
6678 	      && stage > modulo_last_stage)
6679 	    {
6680 	      if (sched_verbose >= 2)
6681 		fprintf (sched_dump,
6682 			 ";;\t\tfound kernel after %d stages, II %d\n",
6683 			 stage, modulo_ii);
6684 	      ls.modulo_epilogue = true;
6685 	    }
6686 	}
6687 
6688       prune_ready_list (temp_state, true, false, ls.modulo_epilogue);
6689       if (ready.n_ready == 0)
6690 	continue;
6691       if (must_backtrack)
6692 	goto do_backtrack;
6693 
6694       ls.shadows_only_p = false;
6695       cycle_issued_insns = 0;
6696       ls.can_issue_more = issue_rate;
6697       for (;;)
6698 	{
6699 	  rtx_insn *insn;
6700 	  int cost;
6701 	  bool asm_p;
6702 
6703 	  if (sort_p && ready.n_ready > 0)
6704 	    {
6705 	      /* Sort the ready list based on priority.  This must be
6706 		 done every iteration through the loop, as schedule_insn
6707 		 may have readied additional insns that will not be
6708 		 sorted correctly.  */
6709 	      ready_sort (&ready);
6710 
6711 	      if (sched_verbose >= 2)
6712 		{
6713 		  fprintf (sched_dump,
6714 			   ";;\t\tReady list after ready_sort:    ");
6715 		  debug_ready_list (&ready);
6716 		}
6717 	    }
6718 
6719 	  /* We don't want md sched reorder to even see debug isns, so put
6720 	     them out right away.  */
6721 	  if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
6722 	      && (*current_sched_info->schedule_more_p) ())
6723 	    {
6724 	      while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
6725 		{
6726 		  rtx_insn *insn = ready_remove_first (&ready);
6727 		  gcc_assert (DEBUG_INSN_P (insn));
6728 		  (*current_sched_info->begin_schedule_ready) (insn);
6729 		  scheduled_insns.safe_push (insn);
6730 		  last_scheduled_insn = insn;
6731 		  advance = schedule_insn (insn);
6732 		  gcc_assert (advance == 0);
6733 		  if (ready.n_ready > 0)
6734 		    ready_sort (&ready);
6735 		}
6736 	    }
6737 
6738 	  if (ls.first_cycle_insn_p && !ready.n_ready)
6739 	    break;
6740 
6741 	resume_after_backtrack:
6742 	  /* Allow the target to reorder the list, typically for
6743 	     better instruction bundling.  */
6744 	  if (sort_p
6745 	      && (ready.n_ready == 0
6746 		  || !SCHED_GROUP_P (ready_element (&ready, 0))))
6747 	    {
6748 	      if (ls.first_cycle_insn_p && targetm.sched.reorder)
6749 		ls.can_issue_more
6750 		  = targetm.sched.reorder (sched_dump, sched_verbose,
6751 					   ready_lastpos (&ready),
6752 					   &ready.n_ready, clock_var);
6753 	      else if (!ls.first_cycle_insn_p && targetm.sched.reorder2)
6754 		ls.can_issue_more
6755 		  = targetm.sched.reorder2 (sched_dump, sched_verbose,
6756 					    ready.n_ready
6757 					    ? ready_lastpos (&ready) : NULL,
6758 					    &ready.n_ready, clock_var);
6759 	    }
6760 
6761 	restart_choose_ready:
6762 	  if (sched_verbose >= 2)
6763 	    {
6764 	      fprintf (sched_dump, ";;\tReady list (t = %3d):  ",
6765 		       clock_var);
6766 	      debug_ready_list (&ready);
6767 	      if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
6768 		print_curr_reg_pressure ();
6769 	    }
6770 
6771 	  if (ready.n_ready == 0
6772 	      && ls.can_issue_more
6773 	      && reload_completed)
6774 	    {
6775 	      /* Allow scheduling insns directly from the queue in case
6776 		 there's nothing better to do (ready list is empty) but
6777 		 there are still vacant dispatch slots in the current cycle.  */
6778 	      if (sched_verbose >= 6)
6779 		fprintf (sched_dump,";;\t\tSecond chance\n");
6780 	      memcpy (temp_state, curr_state, dfa_state_size);
6781 	      if (early_queue_to_ready (temp_state, &ready))
6782 		ready_sort (&ready);
6783 	    }
6784 
6785 	  if (ready.n_ready == 0
6786 	      || !ls.can_issue_more
6787 	      || state_dead_lock_p (curr_state)
6788 	      || !(*current_sched_info->schedule_more_p) ())
6789 	    break;
6790 
6791 	  /* Select and remove the insn from the ready list.  */
6792 	  if (sort_p)
6793 	    {
6794 	      int res;
6795 
6796 	      insn = NULL;
6797 	      res = choose_ready (&ready, ls.first_cycle_insn_p, &insn);
6798 
6799 	      if (res < 0)
6800 		/* Finish cycle.  */
6801 		break;
6802 	      if (res > 0)
6803 		goto restart_choose_ready;
6804 
6805 	      gcc_assert (insn != NULL_RTX);
6806 	    }
6807 	  else
6808 	    insn = ready_remove_first (&ready);
6809 
6810 	  if (sched_pressure != SCHED_PRESSURE_NONE
6811 	      && INSN_TICK (insn) > clock_var)
6812 	    {
6813 	      ready_add (&ready, insn, true);
6814 	      advance = 1;
6815 	      break;
6816 	    }
6817 
6818 	  if (targetm.sched.dfa_new_cycle
6819 	      && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
6820 					      insn, last_clock_var,
6821 					      clock_var, &sort_p))
6822 	    /* SORT_P is used by the target to override sorting
6823 	       of the ready list.  This is needed when the target
6824 	       has modified its internal structures expecting that
6825 	       the insn will be issued next.  As we need the insn
6826 	       to have the highest priority (so it will be returned by
6827 	       the ready_remove_first call above), we invoke
6828 	       ready_add (&ready, insn, true).
6829 	       But, still, there is one issue: INSN can be later
6830 	       discarded by scheduler's front end through
6831 	       current_sched_info->can_schedule_ready_p, hence, won't
6832 	       be issued next.  */
6833 	    {
6834 	      ready_add (&ready, insn, true);
6835               break;
6836 	    }
6837 
6838 	  sort_p = TRUE;
6839 
6840 	  if (current_sched_info->can_schedule_ready_p
6841 	      && ! (*current_sched_info->can_schedule_ready_p) (insn))
6842 	    /* We normally get here only if we don't want to move
6843 	       insn from the split block.  */
6844 	    {
6845 	      TODO_SPEC (insn) = DEP_POSTPONED;
6846 	      goto restart_choose_ready;
6847 	    }
6848 
6849 	  if (delay_htab)
6850 	    {
6851 	      /* If this insn is the first part of a delay-slot pair, record a
6852 		 backtrack point.  */
6853 	      struct delay_pair *delay_entry;
6854 	      delay_entry
6855 		= delay_htab->find_with_hash (insn, htab_hash_pointer (insn));
6856 	      if (delay_entry)
6857 		{
6858 		  save_backtrack_point (delay_entry, ls);
6859 		  if (sched_verbose >= 2)
6860 		    fprintf (sched_dump, ";;\t\tsaving backtrack point\n");
6861 		}
6862 	    }
6863 
6864 	  /* DECISION is made.  */
6865 
6866 	  if (modulo_ii > 0 && INSN_UID (insn) < modulo_iter0_max_uid)
6867 	    {
6868 	      modulo_insns_scheduled++;
6869 	      modulo_last_stage = clock_var / modulo_ii;
6870 	    }
6871           if (TODO_SPEC (insn) & SPECULATIVE)
6872             generate_recovery_code (insn);
6873 
6874 	  if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
6875 	    targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
6876 
6877 	  /* Update counters, etc in the scheduler's front end.  */
6878 	  (*current_sched_info->begin_schedule_ready) (insn);
6879 	  scheduled_insns.safe_push (insn);
6880 	  gcc_assert (NONDEBUG_INSN_P (insn));
6881 	  last_nondebug_scheduled_insn = last_scheduled_insn = insn;
6882 
6883 	  if (recog_memoized (insn) >= 0)
6884 	    {
6885 	      memcpy (temp_state, curr_state, dfa_state_size);
6886 	      cost = state_transition (curr_state, insn);
6887 	      if (sched_pressure != SCHED_PRESSURE_WEIGHTED && !sched_fusion)
6888 		gcc_assert (cost < 0);
6889 	      if (memcmp (temp_state, curr_state, dfa_state_size) != 0)
6890 		cycle_issued_insns++;
6891 	      asm_p = false;
6892 	    }
6893 	  else
6894 	    asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
6895 		     || asm_noperands (PATTERN (insn)) >= 0);
6896 
6897 	  if (targetm.sched.variable_issue)
6898 	    ls.can_issue_more =
6899 	      targetm.sched.variable_issue (sched_dump, sched_verbose,
6900 					    insn, ls.can_issue_more);
6901 	  /* A naked CLOBBER or USE generates no instruction, so do
6902 	     not count them against the issue rate.  */
6903 	  else if (GET_CODE (PATTERN (insn)) != USE
6904 		   && GET_CODE (PATTERN (insn)) != CLOBBER)
6905 	    ls.can_issue_more--;
6906 	  advance = schedule_insn (insn);
6907 
6908 	  if (SHADOW_P (insn))
6909 	    ls.shadows_only_p = true;
6910 
6911 	  /* After issuing an asm insn we should start a new cycle.  */
6912 	  if (advance == 0 && asm_p)
6913 	    advance = 1;
6914 
6915 	  if (must_backtrack)
6916 	    break;
6917 
6918 	  if (advance != 0)
6919 	    break;
6920 
6921 	  ls.first_cycle_insn_p = false;
6922 	  if (ready.n_ready > 0)
6923 	    prune_ready_list (temp_state, false, ls.shadows_only_p,
6924 			      ls.modulo_epilogue);
6925 	}
6926 
6927     do_backtrack:
6928       if (!must_backtrack)
6929 	for (i = 0; i < ready.n_ready; i++)
6930 	  {
6931 	    rtx_insn *insn = ready_element (&ready, i);
6932 	    if (INSN_EXACT_TICK (insn) == clock_var)
6933 	      {
6934 		must_backtrack = true;
6935 		clock_var++;
6936 		break;
6937 	      }
6938 	  }
6939       if (must_backtrack && modulo_ii > 0)
6940 	{
6941 	  if (modulo_backtracks_left == 0)
6942 	    goto end_schedule;
6943 	  modulo_backtracks_left--;
6944 	}
6945       while (must_backtrack)
6946 	{
6947 	  struct haifa_saved_data *failed;
6948 	  rtx_insn *failed_insn;
6949 
6950 	  must_backtrack = false;
6951 	  failed = verify_shadows ();
6952 	  gcc_assert (failed);
6953 
6954 	  failed_insn = failed->delay_pair->i1;
6955 	  /* Clear these queues.  */
6956 	  perform_replacements_new_cycle ();
6957 	  toggle_cancelled_flags (false);
6958 	  unschedule_insns_until (failed_insn);
6959 	  while (failed != backtrack_queue)
6960 	    free_topmost_backtrack_point (true);
6961 	  restore_last_backtrack_point (&ls);
6962 	  if (sched_verbose >= 2)
6963 	    fprintf (sched_dump, ";;\t\trewind to cycle %d\n", clock_var);
6964 	  /* Delay by at least a cycle.  This could cause additional
6965 	     backtracking.  */
6966 	  queue_insn (failed_insn, 1, "backtracked");
6967 	  advance = 0;
6968 	  if (must_backtrack)
6969 	    continue;
6970 	  if (ready.n_ready > 0)
6971 	    goto resume_after_backtrack;
6972 	  else
6973 	    {
6974 	      if (clock_var == 0 && ls.first_cycle_insn_p)
6975 		goto end_schedule;
6976 	      advance = 1;
6977 	      break;
6978 	    }
6979 	}
6980       ls.first_cycle_insn_p = true;
6981     }
6982   if (ls.modulo_epilogue)
6983     success = true;
6984  end_schedule:
6985   if (!ls.first_cycle_insn_p || advance)
6986     advance_one_cycle ();
6987   perform_replacements_new_cycle ();
6988   if (modulo_ii > 0)
6989     {
6990       /* Once again, debug insn suckiness: they can be on the ready list
6991 	 even if they have unresolved dependencies.  To make our view
6992 	 of the world consistent, remove such "ready" insns.  */
6993     restart_debug_insn_loop:
6994       for (i = ready.n_ready - 1; i >= 0; i--)
6995 	{
6996 	  rtx_insn *x;
6997 
6998 	  x = ready_element (&ready, i);
6999 	  if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (x)) != NULL
7000 	      || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (x)) != NULL)
7001 	    {
7002 	      ready_remove (&ready, i);
7003 	      goto restart_debug_insn_loop;
7004 	    }
7005 	}
7006       for (i = ready.n_ready - 1; i >= 0; i--)
7007 	{
7008 	  rtx_insn *x;
7009 
7010 	  x = ready_element (&ready, i);
7011 	  resolve_dependencies (x);
7012 	}
7013       for (i = 0; i <= max_insn_queue_index; i++)
7014 	{
7015 	  rtx_insn_list *link;
7016 	  while ((link = insn_queue[i]) != NULL)
7017 	    {
7018 	      rtx_insn *x = link->insn ();
7019 	      insn_queue[i] = link->next ();
7020 	      QUEUE_INDEX (x) = QUEUE_NOWHERE;
7021 	      free_INSN_LIST_node (link);
7022 	      resolve_dependencies (x);
7023 	    }
7024 	}
7025     }
7026 
7027   if (!success)
7028     undo_all_replacements ();
7029 
7030   /* Debug info.  */
7031   if (sched_verbose)
7032     {
7033       fprintf (sched_dump, ";;\tReady list (final):  ");
7034       debug_ready_list (&ready);
7035     }
7036 
7037   if (modulo_ii == 0 && current_sched_info->queue_must_finish_empty)
7038     /* Sanity check -- queue must be empty now.  Meaningless if region has
7039        multiple bbs.  */
7040     gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
7041   else if (modulo_ii == 0)
7042     {
7043       /* We must maintain QUEUE_INDEX between blocks in region.  */
7044       for (i = ready.n_ready - 1; i >= 0; i--)
7045 	{
7046 	  rtx_insn *x;
7047 
7048 	  x = ready_element (&ready, i);
7049 	  QUEUE_INDEX (x) = QUEUE_NOWHERE;
7050 	  TODO_SPEC (x) = HARD_DEP;
7051 	}
7052 
7053       if (q_size)
7054 	for (i = 0; i <= max_insn_queue_index; i++)
7055 	  {
7056 	    rtx_insn_list *link;
7057 	    for (link = insn_queue[i]; link; link = link->next ())
7058 	      {
7059 		rtx_insn *x;
7060 
7061 		x = link->insn ();
7062 		QUEUE_INDEX (x) = QUEUE_NOWHERE;
7063 		TODO_SPEC (x) = HARD_DEP;
7064 	      }
7065 	    free_INSN_LIST_list (&insn_queue[i]);
7066 	  }
7067     }
7068 
7069   if (sched_pressure == SCHED_PRESSURE_MODEL)
7070     model_end_schedule ();
7071 
7072   if (success)
7073     {
7074       commit_schedule (prev_head, tail, target_bb);
7075       if (sched_verbose)
7076 	fprintf (sched_dump, ";;   total time = %d\n", clock_var);
7077     }
7078   else
7079     last_scheduled_insn = tail;
7080 
7081   scheduled_insns.truncate (0);
7082 
7083   if (!current_sched_info->queue_must_finish_empty
7084       || haifa_recovery_bb_recently_added_p)
7085     {
7086       /* INSN_TICK (minimum clock tick at which the insn becomes
7087          ready) may be not correct for the insn in the subsequent
7088          blocks of the region.  We should use a correct value of
7089          `clock_var' or modify INSN_TICK.  It is better to keep
7090          clock_var value equal to 0 at the start of a basic block.
7091          Therefore we modify INSN_TICK here.  */
7092       fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
7093     }
7094 
7095   if (targetm.sched.finish)
7096     {
7097       targetm.sched.finish (sched_dump, sched_verbose);
7098       /* Target might have added some instructions to the scheduled block
7099 	 in its md_finish () hook.  These new insns don't have any data
7100 	 initialized and to identify them we extend h_i_d so that they'll
7101 	 get zero luids.  */
7102       sched_extend_luids ();
7103     }
7104 
7105   /* Update head/tail boundaries.  */
7106   head = NEXT_INSN (prev_head);
7107   tail = last_scheduled_insn;
7108 
7109   if (sched_verbose)
7110     {
7111       fprintf (sched_dump, ";;   new head = %d\n;;   new tail = %d\n",
7112 	       INSN_UID (head), INSN_UID (tail));
7113 
7114       if (sched_verbose >= 2)
7115 	{
7116 	  dump_insn_stream (head, tail);
7117 	  print_rank_for_schedule_stats (";; TOTAL ", &rank_for_schedule_stats,
7118 					 NULL);
7119 	}
7120 
7121       fprintf (sched_dump, "\n");
7122     }
7123 
7124   head = restore_other_notes (head, NULL);
7125 
7126   current_sched_info->head = head;
7127   current_sched_info->tail = tail;
7128 
7129   free_backtrack_queue ();
7130 
7131   return success;
7132 }
7133 
7134 /* Set_priorities: compute priority of each insn in the block.  */
7135 
7136 int
set_priorities(rtx_insn * head,rtx_insn * tail)7137 set_priorities (rtx_insn *head, rtx_insn *tail)
7138 {
7139   rtx_insn *insn;
7140   int n_insn;
7141   int sched_max_insns_priority =
7142 	current_sched_info->sched_max_insns_priority;
7143   rtx_insn *prev_head;
7144 
7145   if (head == tail && ! INSN_P (head))
7146     gcc_unreachable ();
7147 
7148   n_insn = 0;
7149 
7150   prev_head = PREV_INSN (head);
7151   for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
7152     {
7153       if (!INSN_P (insn))
7154 	continue;
7155 
7156       n_insn++;
7157       (void) priority (insn);
7158 
7159       gcc_assert (INSN_PRIORITY_KNOWN (insn));
7160 
7161       sched_max_insns_priority = MAX (sched_max_insns_priority,
7162 				      INSN_PRIORITY (insn));
7163     }
7164 
7165   current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
7166 
7167   return n_insn;
7168 }
7169 
7170 /* Set sched_dump and sched_verbose for the desired debugging output. */
7171 void
setup_sched_dump(void)7172 setup_sched_dump (void)
7173 {
7174   sched_verbose = sched_verbose_param;
7175   sched_dump = dump_file;
7176   if (!dump_file)
7177     sched_verbose = 0;
7178 }
7179 
7180 /* Allocate data for register pressure sensitive scheduling.  */
7181 static void
alloc_global_sched_pressure_data(void)7182 alloc_global_sched_pressure_data (void)
7183 {
7184   if (sched_pressure != SCHED_PRESSURE_NONE)
7185     {
7186       int i, max_regno = max_reg_num ();
7187 
7188       if (sched_dump != NULL)
7189 	/* We need info about pseudos for rtl dumps about pseudo
7190 	   classes and costs.  */
7191 	regstat_init_n_sets_and_refs ();
7192       ira_set_pseudo_classes (true, sched_verbose ? sched_dump : NULL);
7193       sched_regno_pressure_class
7194 	= (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
7195       for (i = 0; i < max_regno; i++)
7196 	sched_regno_pressure_class[i]
7197 	  = (i < FIRST_PSEUDO_REGISTER
7198 	     ? ira_pressure_class_translate[REGNO_REG_CLASS (i)]
7199 	     : ira_pressure_class_translate[reg_allocno_class (i)]);
7200       curr_reg_live = BITMAP_ALLOC (NULL);
7201       if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
7202 	{
7203 	  saved_reg_live = BITMAP_ALLOC (NULL);
7204 	  region_ref_regs = BITMAP_ALLOC (NULL);
7205 	}
7206       if (sched_pressure == SCHED_PRESSURE_MODEL)
7207 	tmp_bitmap = BITMAP_ALLOC (NULL);
7208 
7209       /* Calculate number of CALL_SAVED_REGS and FIXED_REGS in register classes
7210 	 that we calculate register pressure for.  */
7211       for (int c = 0; c < ira_pressure_classes_num; ++c)
7212 	{
7213 	  enum reg_class cl = ira_pressure_classes[c];
7214 
7215 	  call_saved_regs_num[cl] = 0;
7216 	  fixed_regs_num[cl] = 0;
7217 
7218 	  for (int i = 0; i < ira_class_hard_regs_num[cl]; ++i)
7219 	    {
7220 	      unsigned int regno = ira_class_hard_regs[cl][i];
7221 	      if (fixed_regs[regno])
7222 		++fixed_regs_num[cl];
7223 	      else if (!crtl->abi->clobbers_full_reg_p (regno))
7224 		++call_saved_regs_num[cl];
7225 	    }
7226 	}
7227     }
7228 }
7229 
7230 /*  Free data for register pressure sensitive scheduling.  Also called
7231     from schedule_region when stopping sched-pressure early.  */
7232 void
free_global_sched_pressure_data(void)7233 free_global_sched_pressure_data (void)
7234 {
7235   if (sched_pressure != SCHED_PRESSURE_NONE)
7236     {
7237       if (regstat_n_sets_and_refs != NULL)
7238 	regstat_free_n_sets_and_refs ();
7239       if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
7240 	{
7241 	  BITMAP_FREE (region_ref_regs);
7242 	  BITMAP_FREE (saved_reg_live);
7243 	}
7244       if (sched_pressure == SCHED_PRESSURE_MODEL)
7245 	BITMAP_FREE (tmp_bitmap);
7246       BITMAP_FREE (curr_reg_live);
7247       free (sched_regno_pressure_class);
7248     }
7249 }
7250 
7251 /* Initialize some global state for the scheduler.  This function works
7252    with the common data shared between all the schedulers.  It is called
7253    from the scheduler specific initialization routine.  */
7254 
7255 void
sched_init(void)7256 sched_init (void)
7257 {
7258   /* Disable speculative loads in their presence if cc0 defined.  */
7259   if (HAVE_cc0)
7260   flag_schedule_speculative_load = 0;
7261 
7262   if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
7263     targetm.sched.dispatch_do (NULL, DISPATCH_INIT);
7264 
7265   if (live_range_shrinkage_p)
7266     sched_pressure = SCHED_PRESSURE_WEIGHTED;
7267   else if (flag_sched_pressure
7268 	   && !reload_completed
7269 	   && common_sched_info->sched_pass_id == SCHED_RGN_PASS)
7270     sched_pressure = ((enum sched_pressure_algorithm)
7271 		      param_sched_pressure_algorithm);
7272   else
7273     sched_pressure = SCHED_PRESSURE_NONE;
7274 
7275   if (sched_pressure != SCHED_PRESSURE_NONE)
7276     ira_setup_eliminable_regset ();
7277 
7278   /* Initialize SPEC_INFO.  */
7279   if (targetm.sched.set_sched_flags)
7280     {
7281       spec_info = &spec_info_var;
7282       targetm.sched.set_sched_flags (spec_info);
7283 
7284       if (spec_info->mask != 0)
7285         {
7286 	  spec_info->data_weakness_cutoff
7287 	    = (param_sched_spec_prob_cutoff * MAX_DEP_WEAK) / 100;
7288 	  spec_info->control_weakness_cutoff
7289 	    = (param_sched_spec_prob_cutoff * REG_BR_PROB_BASE) / 100;
7290         }
7291       else
7292 	/* So we won't read anything accidentally.  */
7293 	spec_info = NULL;
7294 
7295     }
7296   else
7297     /* So we won't read anything accidentally.  */
7298     spec_info = 0;
7299 
7300   /* Initialize issue_rate.  */
7301   if (targetm.sched.issue_rate)
7302     issue_rate = targetm.sched.issue_rate ();
7303   else
7304     issue_rate = 1;
7305 
7306   if (targetm.sched.first_cycle_multipass_dfa_lookahead
7307       /* Don't use max_issue with reg_pressure scheduling.  Multipass
7308 	 scheduling and reg_pressure scheduling undo each other's decisions.  */
7309       && sched_pressure == SCHED_PRESSURE_NONE)
7310     dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
7311   else
7312     dfa_lookahead = 0;
7313 
7314   /* Set to "0" so that we recalculate.  */
7315   max_lookahead_tries = 0;
7316 
7317   if (targetm.sched.init_dfa_pre_cycle_insn)
7318     targetm.sched.init_dfa_pre_cycle_insn ();
7319 
7320   if (targetm.sched.init_dfa_post_cycle_insn)
7321     targetm.sched.init_dfa_post_cycle_insn ();
7322 
7323   dfa_start ();
7324   dfa_state_size = state_size ();
7325 
7326   init_alias_analysis ();
7327 
7328   if (!sched_no_dce)
7329     df_set_flags (DF_LR_RUN_DCE);
7330   df_note_add_problem ();
7331 
7332   /* More problems needed for interloop dep calculation in SMS.  */
7333   if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
7334     {
7335       df_rd_add_problem ();
7336       df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
7337     }
7338 
7339   df_analyze ();
7340 
7341   /* Do not run DCE after reload, as this can kill nops inserted
7342      by bundling.  */
7343   if (reload_completed)
7344     df_clear_flags (DF_LR_RUN_DCE);
7345 
7346   regstat_compute_calls_crossed ();
7347 
7348   if (targetm.sched.init_global)
7349     targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
7350 
7351   alloc_global_sched_pressure_data ();
7352 
7353   curr_state = xmalloc (dfa_state_size);
7354 }
7355 
7356 static void haifa_init_only_bb (basic_block, basic_block);
7357 
7358 /* Initialize data structures specific to the Haifa scheduler.  */
7359 void
haifa_sched_init(void)7360 haifa_sched_init (void)
7361 {
7362   setup_sched_dump ();
7363   sched_init ();
7364 
7365   scheduled_insns.create (0);
7366 
7367   if (spec_info != NULL)
7368     {
7369       sched_deps_info->use_deps_list = 1;
7370       sched_deps_info->generate_spec_deps = 1;
7371     }
7372 
7373   /* Initialize luids, dependency caches, target and h_i_d for the
7374      whole function.  */
7375   {
7376     sched_init_bbs ();
7377 
7378     auto_vec<basic_block> bbs (n_basic_blocks_for_fn (cfun));
7379     basic_block bb;
7380     FOR_EACH_BB_FN (bb, cfun)
7381       bbs.quick_push (bb);
7382     sched_init_luids (bbs);
7383     sched_deps_init (true);
7384     sched_extend_target ();
7385     haifa_init_h_i_d (bbs);
7386   }
7387 
7388   sched_init_only_bb = haifa_init_only_bb;
7389   sched_split_block = sched_split_block_1;
7390   sched_create_empty_bb = sched_create_empty_bb_1;
7391   haifa_recovery_bb_ever_added_p = false;
7392 
7393   nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
7394   before_recovery = 0;
7395   after_recovery = 0;
7396 
7397   modulo_ii = 0;
7398 }
7399 
7400 /* Finish work with the data specific to the Haifa scheduler.  */
7401 void
haifa_sched_finish(void)7402 haifa_sched_finish (void)
7403 {
7404   sched_create_empty_bb = NULL;
7405   sched_split_block = NULL;
7406   sched_init_only_bb = NULL;
7407 
7408   if (spec_info && spec_info->dump)
7409     {
7410       char c = reload_completed ? 'a' : 'b';
7411 
7412       fprintf (spec_info->dump,
7413 	       ";; %s:\n", current_function_name ());
7414 
7415       fprintf (spec_info->dump,
7416                ";; Procedure %cr-begin-data-spec motions == %d\n",
7417                c, nr_begin_data);
7418       fprintf (spec_info->dump,
7419                ";; Procedure %cr-be-in-data-spec motions == %d\n",
7420                c, nr_be_in_data);
7421       fprintf (spec_info->dump,
7422                ";; Procedure %cr-begin-control-spec motions == %d\n",
7423                c, nr_begin_control);
7424       fprintf (spec_info->dump,
7425                ";; Procedure %cr-be-in-control-spec motions == %d\n",
7426                c, nr_be_in_control);
7427     }
7428 
7429   scheduled_insns.release ();
7430 
7431   /* Finalize h_i_d, dependency caches, and luids for the whole
7432      function.  Target will be finalized in md_global_finish ().  */
7433   sched_deps_finish ();
7434   sched_finish_luids ();
7435   current_sched_info = NULL;
7436   insn_queue = NULL;
7437   sched_finish ();
7438 }
7439 
7440 /* Free global data used during insn scheduling.  This function works with
7441    the common data shared between the schedulers.  */
7442 
7443 void
sched_finish(void)7444 sched_finish (void)
7445 {
7446   haifa_finish_h_i_d ();
7447   free_global_sched_pressure_data ();
7448   free (curr_state);
7449 
7450   if (targetm.sched.finish_global)
7451     targetm.sched.finish_global (sched_dump, sched_verbose);
7452 
7453   end_alias_analysis ();
7454 
7455   regstat_free_calls_crossed ();
7456 
7457   dfa_finish ();
7458 }
7459 
7460 /* Free all delay_pair structures that were recorded.  */
7461 void
free_delay_pairs(void)7462 free_delay_pairs (void)
7463 {
7464   if (delay_htab)
7465     {
7466       delay_htab->empty ();
7467       delay_htab_i2->empty ();
7468     }
7469 }
7470 
7471 /* Fix INSN_TICKs of the instructions in the current block as well as
7472    INSN_TICKs of their dependents.
7473    HEAD and TAIL are the begin and the end of the current scheduled block.  */
7474 static void
fix_inter_tick(rtx_insn * head,rtx_insn * tail)7475 fix_inter_tick (rtx_insn *head, rtx_insn *tail)
7476 {
7477   /* Set of instructions with corrected INSN_TICK.  */
7478   auto_bitmap processed;
7479   /* ??? It is doubtful if we should assume that cycle advance happens on
7480      basic block boundaries.  Basically insns that are unconditionally ready
7481      on the start of the block are more preferable then those which have
7482      a one cycle dependency over insn from the previous block.  */
7483   int next_clock = clock_var + 1;
7484 
7485   /* Iterates over scheduled instructions and fix their INSN_TICKs and
7486      INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
7487      across different blocks.  */
7488   for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
7489     {
7490       if (INSN_P (head))
7491 	{
7492 	  int tick;
7493 	  sd_iterator_def sd_it;
7494 	  dep_t dep;
7495 
7496 	  tick = INSN_TICK (head);
7497 	  gcc_assert (tick >= MIN_TICK);
7498 
7499 	  /* Fix INSN_TICK of instruction from just scheduled block.  */
7500 	  if (bitmap_set_bit (processed, INSN_LUID (head)))
7501 	    {
7502 	      tick -= next_clock;
7503 
7504 	      if (tick < MIN_TICK)
7505 		tick = MIN_TICK;
7506 
7507 	      INSN_TICK (head) = tick;
7508 	    }
7509 
7510 	  if (DEBUG_INSN_P (head))
7511 	    continue;
7512 
7513 	  FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
7514 	    {
7515 	      rtx_insn *next;
7516 
7517 	      next = DEP_CON (dep);
7518 	      tick = INSN_TICK (next);
7519 
7520 	      if (tick != INVALID_TICK
7521 		  /* If NEXT has its INSN_TICK calculated, fix it.
7522 		     If not - it will be properly calculated from
7523 		     scratch later in fix_tick_ready.  */
7524 		  && bitmap_set_bit (processed, INSN_LUID (next)))
7525 		{
7526 		  tick -= next_clock;
7527 
7528 		  if (tick < MIN_TICK)
7529 		    tick = MIN_TICK;
7530 
7531 		  if (tick > INTER_TICK (next))
7532 		    INTER_TICK (next) = tick;
7533 		  else
7534 		    tick = INTER_TICK (next);
7535 
7536 		  INSN_TICK (next) = tick;
7537 		}
7538 	    }
7539 	}
7540     }
7541 }
7542 
7543 /* Check if NEXT is ready to be added to the ready or queue list.
7544    If "yes", add it to the proper list.
7545    Returns:
7546       -1 - is not ready yet,
7547        0 - added to the ready list,
7548    0 < N - queued for N cycles.  */
7549 int
try_ready(rtx_insn * next)7550 try_ready (rtx_insn *next)
7551 {
7552   ds_t old_ts, new_ts;
7553 
7554   old_ts = TODO_SPEC (next);
7555 
7556   gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP | DEP_CONTROL | DEP_POSTPONED))
7557 	      && (old_ts == HARD_DEP
7558 		  || old_ts == DEP_POSTPONED
7559 		  || (old_ts & SPECULATIVE)
7560 		  || old_ts == DEP_CONTROL));
7561 
7562   new_ts = recompute_todo_spec (next, false);
7563 
7564   if (new_ts & (HARD_DEP | DEP_POSTPONED))
7565     gcc_assert (new_ts == old_ts
7566 		&& QUEUE_INDEX (next) == QUEUE_NOWHERE);
7567   else if (current_sched_info->new_ready)
7568     new_ts = current_sched_info->new_ready (next, new_ts);
7569 
7570   /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
7571      have its original pattern or changed (speculative) one.  This is due
7572      to changing ebb in region scheduling.
7573      * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
7574      has speculative pattern.
7575 
7576      We can't assert (!(new_ts & HARD_DEP) || new_ts == old_ts) here because
7577      control-speculative NEXT could have been discarded by sched-rgn.c
7578      (the same case as when discarded by can_schedule_ready_p ()).  */
7579 
7580   if ((new_ts & SPECULATIVE)
7581       /* If (old_ts == new_ts), then (old_ts & SPECULATIVE) and we don't
7582 	 need to change anything.  */
7583       && new_ts != old_ts)
7584     {
7585       int res;
7586       rtx new_pat;
7587 
7588       gcc_assert ((new_ts & SPECULATIVE) && !(new_ts & ~SPECULATIVE));
7589 
7590       res = haifa_speculate_insn (next, new_ts, &new_pat);
7591 
7592       switch (res)
7593 	{
7594 	case -1:
7595 	  /* It would be nice to change DEP_STATUS of all dependences,
7596 	     which have ((DEP_STATUS & SPECULATIVE) == new_ts) to HARD_DEP,
7597 	     so we won't reanalyze anything.  */
7598 	  new_ts = HARD_DEP;
7599 	  break;
7600 
7601 	case 0:
7602 	  /* We follow the rule, that every speculative insn
7603 	     has non-null ORIG_PAT.  */
7604 	  if (!ORIG_PAT (next))
7605 	    ORIG_PAT (next) = PATTERN (next);
7606 	  break;
7607 
7608 	case 1:
7609 	  if (!ORIG_PAT (next))
7610 	    /* If we gonna to overwrite the original pattern of insn,
7611 	       save it.  */
7612 	    ORIG_PAT (next) = PATTERN (next);
7613 
7614 	  res = haifa_change_pattern (next, new_pat);
7615 	  gcc_assert (res);
7616 	  break;
7617 
7618 	default:
7619 	  gcc_unreachable ();
7620 	}
7621     }
7622 
7623   /* We need to restore pattern only if (new_ts == 0), because otherwise it is
7624      either correct (new_ts & SPECULATIVE),
7625      or we simply don't care (new_ts & HARD_DEP).  */
7626 
7627   gcc_assert (!ORIG_PAT (next)
7628 	      || !IS_SPECULATION_BRANCHY_CHECK_P (next));
7629 
7630   TODO_SPEC (next) = new_ts;
7631 
7632   if (new_ts & (HARD_DEP | DEP_POSTPONED))
7633     {
7634       /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
7635 	 control-speculative NEXT could have been discarded by sched-rgn.c
7636 	 (the same case as when discarded by can_schedule_ready_p ()).  */
7637       /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
7638 
7639       change_queue_index (next, QUEUE_NOWHERE);
7640 
7641       return -1;
7642     }
7643   else if (!(new_ts & BEGIN_SPEC)
7644 	   && ORIG_PAT (next) && PREDICATED_PAT (next) == NULL_RTX
7645 	   && !IS_SPECULATION_CHECK_P (next))
7646     /* We should change pattern of every previously speculative
7647        instruction - and we determine if NEXT was speculative by using
7648        ORIG_PAT field.  Except one case - speculation checks have ORIG_PAT
7649        pat too, so skip them.  */
7650     {
7651       bool success = haifa_change_pattern (next, ORIG_PAT (next));
7652       gcc_assert (success);
7653       ORIG_PAT (next) = 0;
7654     }
7655 
7656   if (sched_verbose >= 2)
7657     {
7658       fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
7659                (*current_sched_info->print_insn) (next, 0));
7660 
7661       if (spec_info && spec_info->dump)
7662         {
7663           if (new_ts & BEGIN_DATA)
7664             fprintf (spec_info->dump, "; data-spec;");
7665           if (new_ts & BEGIN_CONTROL)
7666             fprintf (spec_info->dump, "; control-spec;");
7667           if (new_ts & BE_IN_CONTROL)
7668             fprintf (spec_info->dump, "; in-control-spec;");
7669         }
7670       if (TODO_SPEC (next) & DEP_CONTROL)
7671 	fprintf (sched_dump, " predicated");
7672       fprintf (sched_dump, "\n");
7673     }
7674 
7675   adjust_priority (next);
7676 
7677   return fix_tick_ready (next);
7678 }
7679 
7680 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list.  */
7681 static int
fix_tick_ready(rtx_insn * next)7682 fix_tick_ready (rtx_insn *next)
7683 {
7684   int tick, delay;
7685 
7686   if (!DEBUG_INSN_P (next) && !sd_lists_empty_p (next, SD_LIST_RES_BACK))
7687     {
7688       int full_p;
7689       sd_iterator_def sd_it;
7690       dep_t dep;
7691 
7692       tick = INSN_TICK (next);
7693       /* if tick is not equal to INVALID_TICK, then update
7694 	 INSN_TICK of NEXT with the most recent resolved dependence
7695 	 cost.  Otherwise, recalculate from scratch.  */
7696       full_p = (tick == INVALID_TICK);
7697 
7698       FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
7699         {
7700           rtx_insn *pro = DEP_PRO (dep);
7701           int tick1;
7702 
7703 	  gcc_assert (INSN_TICK (pro) >= MIN_TICK);
7704 
7705           tick1 = INSN_TICK (pro) + dep_cost (dep);
7706           if (tick1 > tick)
7707             tick = tick1;
7708 
7709 	  if (!full_p)
7710 	    break;
7711         }
7712     }
7713   else
7714     tick = -1;
7715 
7716   INSN_TICK (next) = tick;
7717 
7718   delay = tick - clock_var;
7719   if (delay <= 0 || sched_pressure != SCHED_PRESSURE_NONE || sched_fusion)
7720     delay = QUEUE_READY;
7721 
7722   change_queue_index (next, delay);
7723 
7724   return delay;
7725 }
7726 
7727 /* Move NEXT to the proper queue list with (DELAY >= 1),
7728    or add it to the ready list (DELAY == QUEUE_READY),
7729    or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE).  */
7730 static void
change_queue_index(rtx_insn * next,int delay)7731 change_queue_index (rtx_insn *next, int delay)
7732 {
7733   int i = QUEUE_INDEX (next);
7734 
7735   gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
7736 	      && delay != 0);
7737   gcc_assert (i != QUEUE_SCHEDULED);
7738 
7739   if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
7740       || (delay < 0 && delay == i))
7741     /* We have nothing to do.  */
7742     return;
7743 
7744   /* Remove NEXT from wherever it is now.  */
7745   if (i == QUEUE_READY)
7746     ready_remove_insn (next);
7747   else if (i >= 0)
7748     queue_remove (next);
7749 
7750   /* Add it to the proper place.  */
7751   if (delay == QUEUE_READY)
7752     ready_add (readyp, next, false);
7753   else if (delay >= 1)
7754     queue_insn (next, delay, "change queue index");
7755 
7756   if (sched_verbose >= 2)
7757     {
7758       fprintf (sched_dump, ";;\t\ttick updated: insn %s",
7759 	       (*current_sched_info->print_insn) (next, 0));
7760 
7761       if (delay == QUEUE_READY)
7762 	fprintf (sched_dump, " into ready\n");
7763       else if (delay >= 1)
7764 	fprintf (sched_dump, " into queue with cost=%d\n", delay);
7765       else
7766 	fprintf (sched_dump, " removed from ready or queue lists\n");
7767     }
7768 }
7769 
7770 static int sched_ready_n_insns = -1;
7771 
7772 /* Initialize per region data structures.  */
7773 void
sched_extend_ready_list(int new_sched_ready_n_insns)7774 sched_extend_ready_list (int new_sched_ready_n_insns)
7775 {
7776   int i;
7777 
7778   if (sched_ready_n_insns == -1)
7779     /* At the first call we need to initialize one more choice_stack
7780        entry.  */
7781     {
7782       i = 0;
7783       sched_ready_n_insns = 0;
7784       scheduled_insns.reserve (new_sched_ready_n_insns);
7785     }
7786   else
7787     i = sched_ready_n_insns + 1;
7788 
7789   ready.veclen = new_sched_ready_n_insns + issue_rate;
7790   ready.vec = XRESIZEVEC (rtx_insn *, ready.vec, ready.veclen);
7791 
7792   gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
7793 
7794   ready_try = (signed char *) xrecalloc (ready_try, new_sched_ready_n_insns,
7795 					 sched_ready_n_insns,
7796 					 sizeof (*ready_try));
7797 
7798   /* We allocate +1 element to save initial state in the choice_stack[0]
7799      entry.  */
7800   choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
7801 			     new_sched_ready_n_insns + 1);
7802 
7803   for (; i <= new_sched_ready_n_insns; i++)
7804     {
7805       choice_stack[i].state = xmalloc (dfa_state_size);
7806 
7807       if (targetm.sched.first_cycle_multipass_init)
7808 	targetm.sched.first_cycle_multipass_init (&(choice_stack[i]
7809 						    .target_data));
7810     }
7811 
7812   sched_ready_n_insns = new_sched_ready_n_insns;
7813 }
7814 
7815 /* Free per region data structures.  */
7816 void
sched_finish_ready_list(void)7817 sched_finish_ready_list (void)
7818 {
7819   int i;
7820 
7821   free (ready.vec);
7822   ready.vec = NULL;
7823   ready.veclen = 0;
7824 
7825   free (ready_try);
7826   ready_try = NULL;
7827 
7828   for (i = 0; i <= sched_ready_n_insns; i++)
7829     {
7830       if (targetm.sched.first_cycle_multipass_fini)
7831 	targetm.sched.first_cycle_multipass_fini (&(choice_stack[i]
7832 						    .target_data));
7833 
7834       free (choice_stack [i].state);
7835     }
7836   free (choice_stack);
7837   choice_stack = NULL;
7838 
7839   sched_ready_n_insns = -1;
7840 }
7841 
7842 static int
haifa_luid_for_non_insn(rtx x)7843 haifa_luid_for_non_insn (rtx x)
7844 {
7845   gcc_assert (NOTE_P (x) || LABEL_P (x));
7846 
7847   return 0;
7848 }
7849 
7850 /* Generates recovery code for INSN.  */
7851 static void
generate_recovery_code(rtx_insn * insn)7852 generate_recovery_code (rtx_insn *insn)
7853 {
7854   if (TODO_SPEC (insn) & BEGIN_SPEC)
7855     begin_speculative_block (insn);
7856 
7857   /* Here we have insn with no dependencies to
7858      instructions other then CHECK_SPEC ones.  */
7859 
7860   if (TODO_SPEC (insn) & BE_IN_SPEC)
7861     add_to_speculative_block (insn);
7862 }
7863 
7864 /* Helper function.
7865    Tries to add speculative dependencies of type FS between instructions
7866    in deps_list L and TWIN.  */
7867 static void
process_insn_forw_deps_be_in_spec(rtx_insn * insn,rtx_insn * twin,ds_t fs)7868 process_insn_forw_deps_be_in_spec (rtx_insn *insn, rtx_insn *twin, ds_t fs)
7869 {
7870   sd_iterator_def sd_it;
7871   dep_t dep;
7872 
7873   FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
7874     {
7875       ds_t ds;
7876       rtx_insn *consumer;
7877 
7878       consumer = DEP_CON (dep);
7879 
7880       ds = DEP_STATUS (dep);
7881 
7882       if (/* If we want to create speculative dep.  */
7883 	  fs
7884 	  /* And we can do that because this is a true dep.  */
7885 	  && (ds & DEP_TYPES) == DEP_TRUE)
7886 	{
7887 	  gcc_assert (!(ds & BE_IN_SPEC));
7888 
7889 	  if (/* If this dep can be overcome with 'begin speculation'.  */
7890 	      ds & BEGIN_SPEC)
7891 	    /* Then we have a choice: keep the dep 'begin speculative'
7892 	       or transform it into 'be in speculative'.  */
7893 	    {
7894 	      if (/* In try_ready we assert that if insn once became ready
7895 		     it can be removed from the ready (or queue) list only
7896 		     due to backend decision.  Hence we can't let the
7897 		     probability of the speculative dep to decrease.  */
7898 		  ds_weak (ds) <= ds_weak (fs))
7899 		{
7900 		  ds_t new_ds;
7901 
7902 		  new_ds = (ds & ~BEGIN_SPEC) | fs;
7903 
7904 		  if (/* consumer can 'be in speculative'.  */
7905 		      sched_insn_is_legitimate_for_speculation_p (consumer,
7906 								  new_ds))
7907 		    /* Transform it to be in speculative.  */
7908 		    ds = new_ds;
7909 		}
7910 	    }
7911 	  else
7912 	    /* Mark the dep as 'be in speculative'.  */
7913 	    ds |= fs;
7914 	}
7915 
7916       {
7917 	dep_def _new_dep, *new_dep = &_new_dep;
7918 
7919 	init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
7920 	sd_add_dep (new_dep, false);
7921       }
7922     }
7923 }
7924 
7925 /* Generates recovery code for BEGIN speculative INSN.  */
7926 static void
begin_speculative_block(rtx_insn * insn)7927 begin_speculative_block (rtx_insn *insn)
7928 {
7929   if (TODO_SPEC (insn) & BEGIN_DATA)
7930     nr_begin_data++;
7931   if (TODO_SPEC (insn) & BEGIN_CONTROL)
7932     nr_begin_control++;
7933 
7934   create_check_block_twin (insn, false);
7935 
7936   TODO_SPEC (insn) &= ~BEGIN_SPEC;
7937 }
7938 
7939 static void haifa_init_insn (rtx_insn *);
7940 
7941 /* Generates recovery code for BE_IN speculative INSN.  */
7942 static void
add_to_speculative_block(rtx_insn * insn)7943 add_to_speculative_block (rtx_insn *insn)
7944 {
7945   ds_t ts;
7946   sd_iterator_def sd_it;
7947   dep_t dep;
7948   auto_vec<rtx_insn *, 10> twins;
7949 
7950   ts = TODO_SPEC (insn);
7951   gcc_assert (!(ts & ~BE_IN_SPEC));
7952 
7953   if (ts & BE_IN_DATA)
7954     nr_be_in_data++;
7955   if (ts & BE_IN_CONTROL)
7956     nr_be_in_control++;
7957 
7958   TODO_SPEC (insn) &= ~BE_IN_SPEC;
7959   gcc_assert (!TODO_SPEC (insn));
7960 
7961   DONE_SPEC (insn) |= ts;
7962 
7963   /* First we convert all simple checks to branchy.  */
7964   for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7965        sd_iterator_cond (&sd_it, &dep);)
7966     {
7967       rtx_insn *check = DEP_PRO (dep);
7968 
7969       if (IS_SPECULATION_SIMPLE_CHECK_P (check))
7970 	{
7971 	  create_check_block_twin (check, true);
7972 
7973 	  /* Restart search.  */
7974 	  sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7975 	}
7976       else
7977 	/* Continue search.  */
7978 	sd_iterator_next (&sd_it);
7979     }
7980 
7981   auto_vec<rtx_insn *> priorities_roots;
7982   clear_priorities (insn, &priorities_roots);
7983 
7984   while (1)
7985     {
7986       rtx_insn *check, *twin;
7987       basic_block rec;
7988 
7989       /* Get the first backward dependency of INSN.  */
7990       sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7991       if (!sd_iterator_cond (&sd_it, &dep))
7992 	/* INSN has no backward dependencies left.  */
7993 	break;
7994 
7995       gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
7996 		  && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
7997 		  && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
7998 
7999       check = DEP_PRO (dep);
8000 
8001       gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
8002 		  && QUEUE_INDEX (check) == QUEUE_NOWHERE);
8003 
8004       rec = BLOCK_FOR_INSN (check);
8005 
8006       twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
8007       haifa_init_insn (twin);
8008 
8009       sd_copy_back_deps (twin, insn, true);
8010 
8011       if (sched_verbose && spec_info->dump)
8012         /* INSN_BB (insn) isn't determined for twin insns yet.
8013            So we can't use current_sched_info->print_insn.  */
8014         fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
8015                  INSN_UID (twin), rec->index);
8016 
8017       twins.safe_push (twin);
8018 
8019       /* Add dependences between TWIN and all appropriate
8020 	 instructions from REC.  */
8021       FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
8022 	{
8023 	  rtx_insn *pro = DEP_PRO (dep);
8024 
8025 	  gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
8026 
8027 	  /* INSN might have dependencies from the instructions from
8028 	     several recovery blocks.  At this iteration we process those
8029 	     producers that reside in REC.  */
8030 	  if (BLOCK_FOR_INSN (pro) == rec)
8031 	    {
8032 	      dep_def _new_dep, *new_dep = &_new_dep;
8033 
8034 	      init_dep (new_dep, pro, twin, REG_DEP_TRUE);
8035 	      sd_add_dep (new_dep, false);
8036 	    }
8037 	}
8038 
8039       process_insn_forw_deps_be_in_spec (insn, twin, ts);
8040 
8041       /* Remove all dependencies between INSN and insns in REC.  */
8042       for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
8043 	   sd_iterator_cond (&sd_it, &dep);)
8044 	{
8045 	  rtx_insn *pro = DEP_PRO (dep);
8046 
8047 	  if (BLOCK_FOR_INSN (pro) == rec)
8048 	    sd_delete_dep (sd_it);
8049 	  else
8050 	    sd_iterator_next (&sd_it);
8051 	}
8052     }
8053 
8054   /* We couldn't have added the dependencies between INSN and TWINS earlier
8055      because that would make TWINS appear in the INSN_BACK_DEPS (INSN).  */
8056   unsigned int i;
8057   rtx_insn *twin;
8058   FOR_EACH_VEC_ELT_REVERSE (twins, i, twin)
8059     {
8060       dep_def _new_dep, *new_dep = &_new_dep;
8061 
8062       init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
8063       sd_add_dep (new_dep, false);
8064     }
8065 
8066   calc_priorities (priorities_roots);
8067 }
8068 
8069 /* Extends and fills with zeros (only the new part) array pointed to by P.  */
8070 void *
xrecalloc(void * p,size_t new_nmemb,size_t old_nmemb,size_t size)8071 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
8072 {
8073   gcc_assert (new_nmemb >= old_nmemb);
8074   p = XRESIZEVAR (void, p, new_nmemb * size);
8075   memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
8076   return p;
8077 }
8078 
8079 /* Helper function.
8080    Find fallthru edge from PRED.  */
8081 edge
find_fallthru_edge_from(basic_block pred)8082 find_fallthru_edge_from (basic_block pred)
8083 {
8084   edge e;
8085   basic_block succ;
8086 
8087   succ = pred->next_bb;
8088   gcc_assert (succ->prev_bb == pred);
8089 
8090   if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
8091     {
8092       e = find_fallthru_edge (pred->succs);
8093 
8094       if (e)
8095 	{
8096 	  gcc_assert (e->dest == succ || e->dest->index == EXIT_BLOCK);
8097 	  return e;
8098 	}
8099     }
8100   else
8101     {
8102       e = find_fallthru_edge (succ->preds);
8103 
8104       if (e)
8105 	{
8106 	  gcc_assert (e->src == pred);
8107 	  return e;
8108 	}
8109     }
8110 
8111   return NULL;
8112 }
8113 
8114 /* Extend per basic block data structures.  */
8115 static void
sched_extend_bb(void)8116 sched_extend_bb (void)
8117 {
8118   /* The following is done to keep current_sched_info->next_tail non null.  */
8119   rtx_insn *end = BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
8120   rtx_insn *insn = DEBUG_INSN_P (end) ? prev_nondebug_insn (end) : end;
8121   if (NEXT_INSN (end) == 0
8122       || (!NOTE_P (insn)
8123 	  && !LABEL_P (insn)
8124 	  /* Don't emit a NOTE if it would end up before a BARRIER.  */
8125 	  && !BARRIER_P (next_nondebug_insn (end))))
8126     {
8127       rtx_note *note = emit_note_after (NOTE_INSN_DELETED, end);
8128       /* Make note appear outside BB.  */
8129       set_block_for_insn (note, NULL);
8130       BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb) = end;
8131     }
8132 }
8133 
8134 /* Init per basic block data structures.  */
8135 void
sched_init_bbs(void)8136 sched_init_bbs (void)
8137 {
8138   sched_extend_bb ();
8139 }
8140 
8141 /* Initialize BEFORE_RECOVERY variable.  */
8142 static void
init_before_recovery(basic_block * before_recovery_ptr)8143 init_before_recovery (basic_block *before_recovery_ptr)
8144 {
8145   basic_block last;
8146   edge e;
8147 
8148   last = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
8149   e = find_fallthru_edge_from (last);
8150 
8151   if (e)
8152     {
8153       /* We create two basic blocks:
8154          1. Single instruction block is inserted right after E->SRC
8155          and has jump to
8156          2. Empty block right before EXIT_BLOCK.
8157          Between these two blocks recovery blocks will be emitted.  */
8158 
8159       basic_block single, empty;
8160 
8161       /* If the fallthrough edge to exit we've found is from the block we've
8162 	 created before, don't do anything more.  */
8163       if (last == after_recovery)
8164 	return;
8165 
8166       adding_bb_to_current_region_p = false;
8167 
8168       single = sched_create_empty_bb (last);
8169       empty = sched_create_empty_bb (single);
8170 
8171       /* Add new blocks to the root loop.  */
8172       if (current_loops != NULL)
8173 	{
8174 	  add_bb_to_loop (single, (*current_loops->larray)[0]);
8175 	  add_bb_to_loop (empty, (*current_loops->larray)[0]);
8176 	}
8177 
8178       single->count = last->count;
8179       empty->count = last->count;
8180       BB_COPY_PARTITION (single, last);
8181       BB_COPY_PARTITION (empty, last);
8182 
8183       redirect_edge_succ (e, single);
8184       make_single_succ_edge (single, empty, 0);
8185       make_single_succ_edge (empty, EXIT_BLOCK_PTR_FOR_FN (cfun),
8186 			     EDGE_FALLTHRU);
8187 
8188       rtx_code_label *label = block_label (empty);
8189       rtx_jump_insn *x = emit_jump_insn_after (targetm.gen_jump (label),
8190 					       BB_END (single));
8191       JUMP_LABEL (x) = label;
8192       LABEL_NUSES (label)++;
8193       haifa_init_insn (x);
8194 
8195       emit_barrier_after (x);
8196 
8197       sched_init_only_bb (empty, NULL);
8198       sched_init_only_bb (single, NULL);
8199       sched_extend_bb ();
8200 
8201       adding_bb_to_current_region_p = true;
8202       before_recovery = single;
8203       after_recovery = empty;
8204 
8205       if (before_recovery_ptr)
8206         *before_recovery_ptr = before_recovery;
8207 
8208       if (sched_verbose >= 2 && spec_info->dump)
8209         fprintf (spec_info->dump,
8210 		 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
8211                  last->index, single->index, empty->index);
8212     }
8213   else
8214     before_recovery = last;
8215 }
8216 
8217 /* Returns new recovery block.  */
8218 basic_block
sched_create_recovery_block(basic_block * before_recovery_ptr)8219 sched_create_recovery_block (basic_block *before_recovery_ptr)
8220 {
8221   rtx_insn *barrier;
8222   basic_block rec;
8223 
8224   haifa_recovery_bb_recently_added_p = true;
8225   haifa_recovery_bb_ever_added_p = true;
8226 
8227   init_before_recovery (before_recovery_ptr);
8228 
8229   barrier = get_last_bb_insn (before_recovery);
8230   gcc_assert (BARRIER_P (barrier));
8231 
8232   rtx_insn *label = emit_label_after (gen_label_rtx (), barrier);
8233 
8234   rec = create_basic_block (label, label, before_recovery);
8235 
8236   /* A recovery block always ends with an unconditional jump.  */
8237   emit_barrier_after (BB_END (rec));
8238 
8239   if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
8240     BB_SET_PARTITION (rec, BB_COLD_PARTITION);
8241 
8242   if (sched_verbose && spec_info->dump)
8243     fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
8244              rec->index);
8245 
8246   return rec;
8247 }
8248 
8249 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
8250    and emit necessary jumps.  */
8251 void
sched_create_recovery_edges(basic_block first_bb,basic_block rec,basic_block second_bb)8252 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
8253 			     basic_block second_bb)
8254 {
8255   int edge_flags;
8256 
8257   /* This is fixing of incoming edge.  */
8258   /* ??? Which other flags should be specified?  */
8259   if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
8260     /* Partition type is the same, if it is "unpartitioned".  */
8261     edge_flags = EDGE_CROSSING;
8262   else
8263     edge_flags = 0;
8264 
8265   edge e2 = single_succ_edge (first_bb);
8266   edge e = make_edge (first_bb, rec, edge_flags);
8267 
8268   /* TODO: The actual probability can be determined and is computed as
8269      'todo_spec' variable in create_check_block_twin and
8270      in sel-sched.c `check_ds' in create_speculation_check.  */
8271   e->probability = profile_probability::very_unlikely ();
8272   rec->count = e->count ();
8273   e2->probability = e->probability.invert ();
8274 
8275   rtx_code_label *label = block_label (second_bb);
8276   rtx_jump_insn *jump = emit_jump_insn_after (targetm.gen_jump (label),
8277 					      BB_END (rec));
8278   JUMP_LABEL (jump) = label;
8279   LABEL_NUSES (label)++;
8280 
8281   if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
8282     /* Partition type is the same, if it is "unpartitioned".  */
8283     {
8284       /* Rewritten from cfgrtl.c.  */
8285       if (crtl->has_bb_partition && targetm_common.have_named_sections)
8286 	{
8287 	  /* We don't need the same note for the check because
8288 	     any_condjump_p (check) == true.  */
8289 	  CROSSING_JUMP_P (jump) = 1;
8290 	}
8291       edge_flags = EDGE_CROSSING;
8292     }
8293   else
8294     edge_flags = 0;
8295 
8296   make_single_succ_edge (rec, second_bb, edge_flags);
8297   if (dom_info_available_p (CDI_DOMINATORS))
8298     set_immediate_dominator (CDI_DOMINATORS, rec, first_bb);
8299 }
8300 
8301 /* This function creates recovery code for INSN.  If MUTATE_P is nonzero,
8302    INSN is a simple check, that should be converted to branchy one.  */
8303 static void
create_check_block_twin(rtx_insn * insn,bool mutate_p)8304 create_check_block_twin (rtx_insn *insn, bool mutate_p)
8305 {
8306   basic_block rec;
8307   rtx_insn *label, *check, *twin;
8308   rtx check_pat;
8309   ds_t fs;
8310   sd_iterator_def sd_it;
8311   dep_t dep;
8312   dep_def _new_dep, *new_dep = &_new_dep;
8313   ds_t todo_spec;
8314 
8315   gcc_assert (ORIG_PAT (insn) != NULL_RTX);
8316 
8317   if (!mutate_p)
8318     todo_spec = TODO_SPEC (insn);
8319   else
8320     {
8321       gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
8322 		  && (TODO_SPEC (insn) & SPECULATIVE) == 0);
8323 
8324       todo_spec = CHECK_SPEC (insn);
8325     }
8326 
8327   todo_spec &= SPECULATIVE;
8328 
8329   /* Create recovery block.  */
8330   if (mutate_p || targetm.sched.needs_block_p (todo_spec))
8331     {
8332       rec = sched_create_recovery_block (NULL);
8333       label = BB_HEAD (rec);
8334     }
8335   else
8336     {
8337       rec = EXIT_BLOCK_PTR_FOR_FN (cfun);
8338       label = NULL;
8339     }
8340 
8341   /* Emit CHECK.  */
8342   check_pat = targetm.sched.gen_spec_check (insn, label, todo_spec);
8343 
8344   if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8345     {
8346       /* To have mem_reg alive at the beginning of second_bb,
8347 	 we emit check BEFORE insn, so insn after splitting
8348 	 insn will be at the beginning of second_bb, which will
8349 	 provide us with the correct life information.  */
8350       check = emit_jump_insn_before (check_pat, insn);
8351       JUMP_LABEL (check) = label;
8352       LABEL_NUSES (label)++;
8353     }
8354   else
8355     check = emit_insn_before (check_pat, insn);
8356 
8357   /* Extend data structures.  */
8358   haifa_init_insn (check);
8359 
8360   /* CHECK is being added to current region.  Extend ready list.  */
8361   gcc_assert (sched_ready_n_insns != -1);
8362   sched_extend_ready_list (sched_ready_n_insns + 1);
8363 
8364   if (current_sched_info->add_remove_insn)
8365     current_sched_info->add_remove_insn (insn, 0);
8366 
8367   RECOVERY_BLOCK (check) = rec;
8368 
8369   if (sched_verbose && spec_info->dump)
8370     fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
8371              (*current_sched_info->print_insn) (check, 0));
8372 
8373   gcc_assert (ORIG_PAT (insn));
8374 
8375   /* Initialize TWIN (twin is a duplicate of original instruction
8376      in the recovery block).  */
8377   if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8378     {
8379       sd_iterator_def sd_it;
8380       dep_t dep;
8381 
8382       FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
8383 	if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
8384 	  {
8385 	    struct _dep _dep2, *dep2 = &_dep2;
8386 
8387 	    init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
8388 
8389 	    sd_add_dep (dep2, true);
8390 	  }
8391 
8392       twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
8393       haifa_init_insn (twin);
8394 
8395       if (sched_verbose && spec_info->dump)
8396 	/* INSN_BB (insn) isn't determined for twin insns yet.
8397 	   So we can't use current_sched_info->print_insn.  */
8398 	fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
8399 		 INSN_UID (twin), rec->index);
8400     }
8401   else
8402     {
8403       ORIG_PAT (check) = ORIG_PAT (insn);
8404       HAS_INTERNAL_DEP (check) = 1;
8405       twin = check;
8406       /* ??? We probably should change all OUTPUT dependencies to
8407 	 (TRUE | OUTPUT).  */
8408     }
8409 
8410   /* Copy all resolved back dependencies of INSN to TWIN.  This will
8411      provide correct value for INSN_TICK (TWIN).  */
8412   sd_copy_back_deps (twin, insn, true);
8413 
8414   if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8415     /* In case of branchy check, fix CFG.  */
8416     {
8417       basic_block first_bb, second_bb;
8418       rtx_insn *jump;
8419 
8420       first_bb = BLOCK_FOR_INSN (check);
8421       second_bb = sched_split_block (first_bb, check);
8422 
8423       sched_create_recovery_edges (first_bb, rec, second_bb);
8424 
8425       sched_init_only_bb (second_bb, first_bb);
8426       sched_init_only_bb (rec, EXIT_BLOCK_PTR_FOR_FN (cfun));
8427 
8428       jump = BB_END (rec);
8429       haifa_init_insn (jump);
8430     }
8431 
8432   /* Move backward dependences from INSN to CHECK and
8433      move forward dependences from INSN to TWIN.  */
8434 
8435   /* First, create dependencies between INSN's producers and CHECK & TWIN.  */
8436   FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
8437     {
8438       rtx_insn *pro = DEP_PRO (dep);
8439       ds_t ds;
8440 
8441       /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
8442 	 check --TRUE--> producer  ??? or ANTI ???
8443 	 twin  --TRUE--> producer
8444 	 twin  --ANTI--> check
8445 
8446 	 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
8447 	 check --ANTI--> producer
8448 	 twin  --ANTI--> producer
8449 	 twin  --ANTI--> check
8450 
8451 	 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
8452 	 check ~~TRUE~~> producer
8453 	 twin  ~~TRUE~~> producer
8454 	 twin  --ANTI--> check  */
8455 
8456       ds = DEP_STATUS (dep);
8457 
8458       if (ds & BEGIN_SPEC)
8459 	{
8460 	  gcc_assert (!mutate_p);
8461 	  ds &= ~BEGIN_SPEC;
8462 	}
8463 
8464       init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
8465       sd_add_dep (new_dep, false);
8466 
8467       if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8468 	{
8469 	  DEP_CON (new_dep) = twin;
8470 	  sd_add_dep (new_dep, false);
8471 	}
8472     }
8473 
8474   /* Second, remove backward dependencies of INSN.  */
8475   for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
8476        sd_iterator_cond (&sd_it, &dep);)
8477     {
8478       if ((DEP_STATUS (dep) & BEGIN_SPEC)
8479 	  || mutate_p)
8480 	/* We can delete this dep because we overcome it with
8481 	   BEGIN_SPECULATION.  */
8482 	sd_delete_dep (sd_it);
8483       else
8484 	sd_iterator_next (&sd_it);
8485     }
8486 
8487   /* Future Speculations.  Determine what BE_IN speculations will be like.  */
8488   fs = 0;
8489 
8490   /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
8491      here.  */
8492 
8493   gcc_assert (!DONE_SPEC (insn));
8494 
8495   if (!mutate_p)
8496     {
8497       ds_t ts = TODO_SPEC (insn);
8498 
8499       DONE_SPEC (insn) = ts & BEGIN_SPEC;
8500       CHECK_SPEC (check) = ts & BEGIN_SPEC;
8501 
8502       /* Luckiness of future speculations solely depends upon initial
8503 	 BEGIN speculation.  */
8504       if (ts & BEGIN_DATA)
8505 	fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
8506       if (ts & BEGIN_CONTROL)
8507 	fs = set_dep_weak (fs, BE_IN_CONTROL,
8508 			   get_dep_weak (ts, BEGIN_CONTROL));
8509     }
8510   else
8511     CHECK_SPEC (check) = CHECK_SPEC (insn);
8512 
8513   /* Future speculations: call the helper.  */
8514   process_insn_forw_deps_be_in_spec (insn, twin, fs);
8515 
8516   if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8517     {
8518       /* Which types of dependencies should we use here is,
8519 	 generally, machine-dependent question...  But, for now,
8520 	 it is not.  */
8521 
8522       if (!mutate_p)
8523 	{
8524 	  init_dep (new_dep, insn, check, REG_DEP_TRUE);
8525 	  sd_add_dep (new_dep, false);
8526 
8527 	  init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
8528 	  sd_add_dep (new_dep, false);
8529 	}
8530       else
8531 	{
8532 	  if (spec_info->dump)
8533 	    fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
8534 		     (*current_sched_info->print_insn) (insn, 0));
8535 
8536 	  /* Remove all dependencies of the INSN.  */
8537 	  {
8538 	    sd_it = sd_iterator_start (insn, (SD_LIST_FORW
8539 					      | SD_LIST_BACK
8540 					      | SD_LIST_RES_BACK));
8541 	    while (sd_iterator_cond (&sd_it, &dep))
8542 	      sd_delete_dep (sd_it);
8543 	  }
8544 
8545 	  /* If former check (INSN) already was moved to the ready (or queue)
8546 	     list, add new check (CHECK) there too.  */
8547 	  if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
8548 	    try_ready (check);
8549 
8550 	  /* Remove old check from instruction stream and free its
8551 	     data.  */
8552 	  sched_remove_insn (insn);
8553 	}
8554 
8555       init_dep (new_dep, check, twin, REG_DEP_ANTI);
8556       sd_add_dep (new_dep, false);
8557     }
8558   else
8559     {
8560       init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
8561       sd_add_dep (new_dep, false);
8562     }
8563 
8564   if (!mutate_p)
8565     /* Fix priorities.  If MUTATE_P is nonzero, this is not necessary,
8566        because it'll be done later in add_to_speculative_block.  */
8567     {
8568       auto_vec<rtx_insn *> priorities_roots;
8569 
8570       clear_priorities (twin, &priorities_roots);
8571       calc_priorities (priorities_roots);
8572     }
8573 }
8574 
8575 /* Removes dependency between instructions in the recovery block REC
8576    and usual region instructions.  It keeps inner dependences so it
8577    won't be necessary to recompute them.  */
8578 static void
fix_recovery_deps(basic_block rec)8579 fix_recovery_deps (basic_block rec)
8580 {
8581   rtx_insn *note, *insn, *jump;
8582   auto_vec<rtx_insn *, 10> ready_list;
8583   auto_bitmap in_ready;
8584 
8585   /* NOTE - a basic block note.  */
8586   note = NEXT_INSN (BB_HEAD (rec));
8587   gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8588   insn = BB_END (rec);
8589   gcc_assert (JUMP_P (insn));
8590   insn = PREV_INSN (insn);
8591 
8592   do
8593     {
8594       sd_iterator_def sd_it;
8595       dep_t dep;
8596 
8597       for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
8598 	   sd_iterator_cond (&sd_it, &dep);)
8599 	{
8600 	  rtx_insn *consumer = DEP_CON (dep);
8601 
8602 	  if (BLOCK_FOR_INSN (consumer) != rec)
8603 	    {
8604 	      sd_delete_dep (sd_it);
8605 
8606 	      if (bitmap_set_bit (in_ready, INSN_LUID (consumer)))
8607 		ready_list.safe_push (consumer);
8608 	    }
8609 	  else
8610 	    {
8611 	      gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
8612 
8613 	      sd_iterator_next (&sd_it);
8614 	    }
8615 	}
8616 
8617       insn = PREV_INSN (insn);
8618     }
8619   while (insn != note);
8620 
8621   /* Try to add instructions to the ready or queue list.  */
8622   unsigned int i;
8623   rtx_insn *temp;
8624   FOR_EACH_VEC_ELT_REVERSE (ready_list, i, temp)
8625     try_ready (temp);
8626 
8627   /* Fixing jump's dependences.  */
8628   insn = BB_HEAD (rec);
8629   jump = BB_END (rec);
8630 
8631   gcc_assert (LABEL_P (insn));
8632   insn = NEXT_INSN (insn);
8633 
8634   gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
8635   add_jump_dependencies (insn, jump);
8636 }
8637 
8638 /* Change pattern of INSN to NEW_PAT.  Invalidate cached haifa
8639    instruction data.  */
8640 static bool
haifa_change_pattern(rtx_insn * insn,rtx new_pat)8641 haifa_change_pattern (rtx_insn *insn, rtx new_pat)
8642 {
8643   int t;
8644 
8645   t = validate_change (insn, &PATTERN (insn), new_pat, 0);
8646   if (!t)
8647     return false;
8648 
8649   update_insn_after_change (insn);
8650   return true;
8651 }
8652 
8653 /* -1 - can't speculate,
8654    0 - for speculation with REQUEST mode it is OK to use
8655    current instruction pattern,
8656    1 - need to change pattern for *NEW_PAT to be speculative.  */
8657 int
sched_speculate_insn(rtx_insn * insn,ds_t request,rtx * new_pat)8658 sched_speculate_insn (rtx_insn *insn, ds_t request, rtx *new_pat)
8659 {
8660   gcc_assert (current_sched_info->flags & DO_SPECULATION
8661               && (request & SPECULATIVE)
8662 	      && sched_insn_is_legitimate_for_speculation_p (insn, request));
8663 
8664   if ((request & spec_info->mask) != request)
8665     return -1;
8666 
8667   if (request & BE_IN_SPEC
8668       && !(request & BEGIN_SPEC))
8669     return 0;
8670 
8671   return targetm.sched.speculate_insn (insn, request, new_pat);
8672 }
8673 
8674 static int
haifa_speculate_insn(rtx_insn * insn,ds_t request,rtx * new_pat)8675 haifa_speculate_insn (rtx_insn *insn, ds_t request, rtx *new_pat)
8676 {
8677   gcc_assert (sched_deps_info->generate_spec_deps
8678 	      && !IS_SPECULATION_CHECK_P (insn));
8679 
8680   if (HAS_INTERNAL_DEP (insn)
8681       || SCHED_GROUP_P (insn))
8682     return -1;
8683 
8684   return sched_speculate_insn (insn, request, new_pat);
8685 }
8686 
8687 /* Print some information about block BB, which starts with HEAD and
8688    ends with TAIL, before scheduling it.
8689    I is zero, if scheduler is about to start with the fresh ebb.  */
8690 static void
dump_new_block_header(int i,basic_block bb,rtx_insn * head,rtx_insn * tail)8691 dump_new_block_header (int i, basic_block bb, rtx_insn *head, rtx_insn *tail)
8692 {
8693   if (!i)
8694     fprintf (sched_dump,
8695 	     ";;   ======================================================\n");
8696   else
8697     fprintf (sched_dump,
8698 	     ";;   =====================ADVANCING TO=====================\n");
8699   fprintf (sched_dump,
8700 	   ";;   -- basic block %d from %d to %d -- %s reload\n",
8701 	   bb->index, INSN_UID (head), INSN_UID (tail),
8702 	   (reload_completed ? "after" : "before"));
8703   fprintf (sched_dump,
8704 	   ";;   ======================================================\n");
8705   fprintf (sched_dump, "\n");
8706 }
8707 
8708 /* Unlink basic block notes and labels and saves them, so they
8709    can be easily restored.  We unlink basic block notes in EBB to
8710    provide back-compatibility with the previous code, as target backends
8711    assume, that there'll be only instructions between
8712    current_sched_info->{head and tail}.  We restore these notes as soon
8713    as we can.
8714    FIRST (LAST) is the first (last) basic block in the ebb.
8715    NB: In usual case (FIRST == LAST) nothing is really done.  */
8716 void
unlink_bb_notes(basic_block first,basic_block last)8717 unlink_bb_notes (basic_block first, basic_block last)
8718 {
8719   /* We DON'T unlink basic block notes of the first block in the ebb.  */
8720   if (first == last)
8721     return;
8722 
8723   bb_header = XNEWVEC (rtx_insn *, last_basic_block_for_fn (cfun));
8724 
8725   /* Make a sentinel.  */
8726   if (last->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
8727     bb_header[last->next_bb->index] = 0;
8728 
8729   first = first->next_bb;
8730   do
8731     {
8732       rtx_insn *prev, *label, *note, *next;
8733 
8734       label = BB_HEAD (last);
8735       if (LABEL_P (label))
8736 	note = NEXT_INSN (label);
8737       else
8738 	note = label;
8739       gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8740 
8741       prev = PREV_INSN (label);
8742       next = NEXT_INSN (note);
8743       gcc_assert (prev && next);
8744 
8745       SET_NEXT_INSN (prev) = next;
8746       SET_PREV_INSN (next) = prev;
8747 
8748       bb_header[last->index] = label;
8749 
8750       if (last == first)
8751 	break;
8752 
8753       last = last->prev_bb;
8754     }
8755   while (1);
8756 }
8757 
8758 /* Restore basic block notes.
8759    FIRST is the first basic block in the ebb.  */
8760 static void
restore_bb_notes(basic_block first)8761 restore_bb_notes (basic_block first)
8762 {
8763   if (!bb_header)
8764     return;
8765 
8766   /* We DON'T unlink basic block notes of the first block in the ebb.  */
8767   first = first->next_bb;
8768   /* Remember: FIRST is actually a second basic block in the ebb.  */
8769 
8770   while (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
8771 	 && bb_header[first->index])
8772     {
8773       rtx_insn *prev, *label, *note, *next;
8774 
8775       label = bb_header[first->index];
8776       prev = PREV_INSN (label);
8777       next = NEXT_INSN (prev);
8778 
8779       if (LABEL_P (label))
8780 	note = NEXT_INSN (label);
8781       else
8782 	note = label;
8783       gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8784 
8785       bb_header[first->index] = 0;
8786 
8787       SET_NEXT_INSN (prev) = label;
8788       SET_NEXT_INSN (note) = next;
8789       SET_PREV_INSN (next) = note;
8790 
8791       first = first->next_bb;
8792     }
8793 
8794   free (bb_header);
8795   bb_header = 0;
8796 }
8797 
8798 /* Helper function.
8799    Fix CFG after both in- and inter-block movement of
8800    control_flow_insn_p JUMP.  */
8801 static void
fix_jump_move(rtx_insn * jump)8802 fix_jump_move (rtx_insn *jump)
8803 {
8804   basic_block bb, jump_bb, jump_bb_next;
8805 
8806   bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8807   jump_bb = BLOCK_FOR_INSN (jump);
8808   jump_bb_next = jump_bb->next_bb;
8809 
8810   gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
8811 	      || IS_SPECULATION_BRANCHY_CHECK_P (jump));
8812 
8813   if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
8814     /* if jump_bb_next is not empty.  */
8815     BB_END (jump_bb) = BB_END (jump_bb_next);
8816 
8817   if (BB_END (bb) != PREV_INSN (jump))
8818     /* Then there are instruction after jump that should be placed
8819        to jump_bb_next.  */
8820     BB_END (jump_bb_next) = BB_END (bb);
8821   else
8822     /* Otherwise jump_bb_next is empty.  */
8823     BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
8824 
8825   /* To make assertion in move_insn happy.  */
8826   BB_END (bb) = PREV_INSN (jump);
8827 
8828   update_bb_for_insn (jump_bb_next);
8829 }
8830 
8831 /* Fix CFG after interblock movement of control_flow_insn_p JUMP.  */
8832 static void
move_block_after_check(rtx_insn * jump)8833 move_block_after_check (rtx_insn *jump)
8834 {
8835   basic_block bb, jump_bb, jump_bb_next;
8836   vec<edge, va_gc> *t;
8837 
8838   bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8839   jump_bb = BLOCK_FOR_INSN (jump);
8840   jump_bb_next = jump_bb->next_bb;
8841 
8842   update_bb_for_insn (jump_bb);
8843 
8844   gcc_assert (IS_SPECULATION_CHECK_P (jump)
8845 	      || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
8846 
8847   unlink_block (jump_bb_next);
8848   link_block (jump_bb_next, bb);
8849 
8850   t = bb->succs;
8851   bb->succs = 0;
8852   move_succs (&(jump_bb->succs), bb);
8853   move_succs (&(jump_bb_next->succs), jump_bb);
8854   move_succs (&t, jump_bb_next);
8855 
8856   df_mark_solutions_dirty ();
8857 
8858   common_sched_info->fix_recovery_cfg
8859     (bb->index, jump_bb->index, jump_bb_next->index);
8860 }
8861 
8862 /* Helper function for move_block_after_check.
8863    This functions attaches edge vector pointed to by SUCCSP to
8864    block TO.  */
8865 static void
move_succs(vec<edge,va_gc> ** succsp,basic_block to)8866 move_succs (vec<edge, va_gc> **succsp, basic_block to)
8867 {
8868   edge e;
8869   edge_iterator ei;
8870 
8871   gcc_assert (to->succs == 0);
8872 
8873   to->succs = *succsp;
8874 
8875   FOR_EACH_EDGE (e, ei, to->succs)
8876     e->src = to;
8877 
8878   *succsp = 0;
8879 }
8880 
8881 /* Remove INSN from the instruction stream.
8882    INSN should have any dependencies.  */
8883 static void
sched_remove_insn(rtx_insn * insn)8884 sched_remove_insn (rtx_insn *insn)
8885 {
8886   sd_finish_insn (insn);
8887 
8888   change_queue_index (insn, QUEUE_NOWHERE);
8889   current_sched_info->add_remove_insn (insn, 1);
8890   delete_insn (insn);
8891 }
8892 
8893 /* Clear priorities of all instructions, that are forward dependent on INSN.
8894    Store in vector pointed to by ROOTS_PTR insns on which priority () should
8895    be invoked to initialize all cleared priorities.  */
8896 static void
clear_priorities(rtx_insn * insn,rtx_vec_t * roots_ptr)8897 clear_priorities (rtx_insn *insn, rtx_vec_t *roots_ptr)
8898 {
8899   sd_iterator_def sd_it;
8900   dep_t dep;
8901   bool insn_is_root_p = true;
8902 
8903   gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
8904 
8905   FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
8906     {
8907       rtx_insn *pro = DEP_PRO (dep);
8908 
8909       if (INSN_PRIORITY_STATUS (pro) >= 0
8910 	  && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
8911 	{
8912 	  /* If DEP doesn't contribute to priority then INSN itself should
8913 	     be added to priority roots.  */
8914 	  if (contributes_to_priority_p (dep))
8915 	    insn_is_root_p = false;
8916 
8917 	  INSN_PRIORITY_STATUS (pro) = -1;
8918 	  clear_priorities (pro, roots_ptr);
8919 	}
8920     }
8921 
8922   if (insn_is_root_p)
8923     roots_ptr->safe_push (insn);
8924 }
8925 
8926 /* Recompute priorities of instructions, whose priorities might have been
8927    changed.  ROOTS is a vector of instructions whose priority computation will
8928    trigger initialization of all cleared priorities.  */
8929 static void
calc_priorities(rtx_vec_t roots)8930 calc_priorities (rtx_vec_t roots)
8931 {
8932   int i;
8933   rtx_insn *insn;
8934 
8935   FOR_EACH_VEC_ELT (roots, i, insn)
8936     priority (insn);
8937 }
8938 
8939 
8940 /* Add dependences between JUMP and other instructions in the recovery
8941    block.  INSN is the first insn the recovery block.  */
8942 static void
add_jump_dependencies(rtx_insn * insn,rtx_insn * jump)8943 add_jump_dependencies (rtx_insn *insn, rtx_insn *jump)
8944 {
8945   do
8946     {
8947       insn = NEXT_INSN (insn);
8948       if (insn == jump)
8949 	break;
8950 
8951       if (dep_list_size (insn, SD_LIST_FORW) == 0)
8952 	{
8953 	  dep_def _new_dep, *new_dep = &_new_dep;
8954 
8955 	  init_dep (new_dep, insn, jump, REG_DEP_ANTI);
8956 	  sd_add_dep (new_dep, false);
8957 	}
8958     }
8959   while (1);
8960 
8961   gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
8962 }
8963 
8964 /* Extend data structures for logical insn UID.  */
8965 void
sched_extend_luids(void)8966 sched_extend_luids (void)
8967 {
8968   int new_luids_max_uid = get_max_uid () + 1;
8969 
8970   sched_luids.safe_grow_cleared (new_luids_max_uid);
8971 }
8972 
8973 /* Initialize LUID for INSN.  */
8974 void
sched_init_insn_luid(rtx_insn * insn)8975 sched_init_insn_luid (rtx_insn *insn)
8976 {
8977   int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
8978   int luid;
8979 
8980   if (i >= 0)
8981     {
8982       luid = sched_max_luid;
8983       sched_max_luid += i;
8984     }
8985   else
8986     luid = -1;
8987 
8988   SET_INSN_LUID (insn, luid);
8989 }
8990 
8991 /* Initialize luids for BBS.
8992    The hook common_sched_info->luid_for_non_insn () is used to determine
8993    if notes, labels, etc. need luids.  */
8994 void
sched_init_luids(bb_vec_t bbs)8995 sched_init_luids (bb_vec_t bbs)
8996 {
8997   int i;
8998   basic_block bb;
8999 
9000   sched_extend_luids ();
9001   FOR_EACH_VEC_ELT (bbs, i, bb)
9002     {
9003       rtx_insn *insn;
9004 
9005       FOR_BB_INSNS (bb, insn)
9006 	sched_init_insn_luid (insn);
9007     }
9008 }
9009 
9010 /* Free LUIDs.  */
9011 void
sched_finish_luids(void)9012 sched_finish_luids (void)
9013 {
9014   sched_luids.release ();
9015   sched_max_luid = 1;
9016 }
9017 
9018 /* Return logical uid of INSN.  Helpful while debugging.  */
9019 int
insn_luid(rtx_insn * insn)9020 insn_luid (rtx_insn *insn)
9021 {
9022   return INSN_LUID (insn);
9023 }
9024 
9025 /* Extend per insn data in the target.  */
9026 void
sched_extend_target(void)9027 sched_extend_target (void)
9028 {
9029   if (targetm.sched.h_i_d_extended)
9030     targetm.sched.h_i_d_extended ();
9031 }
9032 
9033 /* Extend global scheduler structures (those, that live across calls to
9034    schedule_block) to include information about just emitted INSN.  */
9035 static void
extend_h_i_d(void)9036 extend_h_i_d (void)
9037 {
9038   int reserve = (get_max_uid () + 1 - h_i_d.length ());
9039   if (reserve > 0
9040       && ! h_i_d.space (reserve))
9041     {
9042       h_i_d.safe_grow_cleared (3 * get_max_uid () / 2);
9043       sched_extend_target ();
9044     }
9045 }
9046 
9047 /* Initialize h_i_d entry of the INSN with default values.
9048    Values, that are not explicitly initialized here, hold zero.  */
9049 static void
init_h_i_d(rtx_insn * insn)9050 init_h_i_d (rtx_insn *insn)
9051 {
9052   if (INSN_LUID (insn) > 0)
9053     {
9054       INSN_COST (insn) = -1;
9055       QUEUE_INDEX (insn) = QUEUE_NOWHERE;
9056       INSN_TICK (insn) = INVALID_TICK;
9057       INSN_EXACT_TICK (insn) = INVALID_TICK;
9058       INTER_TICK (insn) = INVALID_TICK;
9059       TODO_SPEC (insn) = HARD_DEP;
9060       INSN_AUTOPREF_MULTIPASS_DATA (insn)[0].status
9061 	= AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
9062       INSN_AUTOPREF_MULTIPASS_DATA (insn)[1].status
9063 	= AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
9064     }
9065 }
9066 
9067 /* Initialize haifa_insn_data for BBS.  */
9068 void
haifa_init_h_i_d(bb_vec_t bbs)9069 haifa_init_h_i_d (bb_vec_t bbs)
9070 {
9071   int i;
9072   basic_block bb;
9073 
9074   extend_h_i_d ();
9075   FOR_EACH_VEC_ELT (bbs, i, bb)
9076     {
9077       rtx_insn *insn;
9078 
9079       FOR_BB_INSNS (bb, insn)
9080 	init_h_i_d (insn);
9081     }
9082 }
9083 
9084 /* Finalize haifa_insn_data.  */
9085 void
haifa_finish_h_i_d(void)9086 haifa_finish_h_i_d (void)
9087 {
9088   int i;
9089   haifa_insn_data_t data;
9090   reg_use_data *use, *next_use;
9091   reg_set_data *set, *next_set;
9092 
9093   FOR_EACH_VEC_ELT (h_i_d, i, data)
9094     {
9095       free (data->max_reg_pressure);
9096       free (data->reg_pressure);
9097       for (use = data->reg_use_list; use != NULL; use = next_use)
9098 	{
9099 	  next_use = use->next_insn_use;
9100 	  free (use);
9101 	}
9102       for (set = data->reg_set_list; set != NULL; set = next_set)
9103 	{
9104 	  next_set = set->next_insn_set;
9105 	  free (set);
9106 	}
9107 
9108     }
9109   h_i_d.release ();
9110 }
9111 
9112 /* Init data for the new insn INSN.  */
9113 static void
haifa_init_insn(rtx_insn * insn)9114 haifa_init_insn (rtx_insn *insn)
9115 {
9116   gcc_assert (insn != NULL);
9117 
9118   sched_extend_luids ();
9119   sched_init_insn_luid (insn);
9120   sched_extend_target ();
9121   sched_deps_init (false);
9122   extend_h_i_d ();
9123   init_h_i_d (insn);
9124 
9125   if (adding_bb_to_current_region_p)
9126     {
9127       sd_init_insn (insn);
9128 
9129       /* Extend dependency caches by one element.  */
9130       extend_dependency_caches (1, false);
9131     }
9132   if (sched_pressure != SCHED_PRESSURE_NONE)
9133     init_insn_reg_pressure_info (insn);
9134 }
9135 
9136 /* Init data for the new basic block BB which comes after AFTER.  */
9137 static void
haifa_init_only_bb(basic_block bb,basic_block after)9138 haifa_init_only_bb (basic_block bb, basic_block after)
9139 {
9140   gcc_assert (bb != NULL);
9141 
9142   sched_init_bbs ();
9143 
9144   if (common_sched_info->add_block)
9145     /* This changes only data structures of the front-end.  */
9146     common_sched_info->add_block (bb, after);
9147 }
9148 
9149 /* A generic version of sched_split_block ().  */
9150 basic_block
sched_split_block_1(basic_block first_bb,rtx after)9151 sched_split_block_1 (basic_block first_bb, rtx after)
9152 {
9153   edge e;
9154 
9155   e = split_block (first_bb, after);
9156   gcc_assert (e->src == first_bb);
9157 
9158   /* sched_split_block emits note if *check == BB_END.  Probably it
9159      is better to rip that note off.  */
9160 
9161   return e->dest;
9162 }
9163 
9164 /* A generic version of sched_create_empty_bb ().  */
9165 basic_block
sched_create_empty_bb_1(basic_block after)9166 sched_create_empty_bb_1 (basic_block after)
9167 {
9168   return create_empty_bb (after);
9169 }
9170 
9171 /* Insert PAT as an INSN into the schedule and update the necessary data
9172    structures to account for it. */
9173 rtx_insn *
sched_emit_insn(rtx pat)9174 sched_emit_insn (rtx pat)
9175 {
9176   rtx_insn *insn = emit_insn_before (pat, first_nonscheduled_insn ());
9177   haifa_init_insn (insn);
9178 
9179   if (current_sched_info->add_remove_insn)
9180     current_sched_info->add_remove_insn (insn, 0);
9181 
9182   (*current_sched_info->begin_schedule_ready) (insn);
9183   scheduled_insns.safe_push (insn);
9184 
9185   last_scheduled_insn = insn;
9186   return insn;
9187 }
9188 
9189 /* This function returns a candidate satisfying dispatch constraints from
9190    the ready list.  */
9191 
9192 static rtx_insn *
ready_remove_first_dispatch(struct ready_list * ready)9193 ready_remove_first_dispatch (struct ready_list *ready)
9194 {
9195   int i;
9196   rtx_insn *insn = ready_element (ready, 0);
9197 
9198   if (ready->n_ready == 1
9199       || !INSN_P (insn)
9200       || INSN_CODE (insn) < 0
9201       || !active_insn_p (insn)
9202       || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
9203     return ready_remove_first (ready);
9204 
9205   for (i = 1; i < ready->n_ready; i++)
9206     {
9207       insn = ready_element (ready, i);
9208 
9209       if (!INSN_P (insn)
9210 	  || INSN_CODE (insn) < 0
9211 	  || !active_insn_p (insn))
9212 	continue;
9213 
9214       if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
9215 	{
9216 	  /* Return ith element of ready.  */
9217 	  insn = ready_remove (ready, i);
9218 	  return insn;
9219 	}
9220     }
9221 
9222   if (targetm.sched.dispatch (NULL, DISPATCH_VIOLATION))
9223     return ready_remove_first (ready);
9224 
9225   for (i = 1; i < ready->n_ready; i++)
9226     {
9227       insn = ready_element (ready, i);
9228 
9229       if (!INSN_P (insn)
9230 	  || INSN_CODE (insn) < 0
9231 	  || !active_insn_p (insn))
9232 	continue;
9233 
9234       /* Return i-th element of ready.  */
9235       if (targetm.sched.dispatch (insn, IS_CMP))
9236 	return ready_remove (ready, i);
9237     }
9238 
9239   return ready_remove_first (ready);
9240 }
9241 
9242 /* Get number of ready insn in the ready list.  */
9243 
9244 int
number_in_ready(void)9245 number_in_ready (void)
9246 {
9247   return ready.n_ready;
9248 }
9249 
9250 /* Get number of ready's in the ready list.  */
9251 
9252 rtx_insn *
get_ready_element(int i)9253 get_ready_element (int i)
9254 {
9255   return ready_element (&ready, i);
9256 }
9257 
9258 #endif /* INSN_SCHEDULING */
9259