1 /* Instruction scheduling pass.
2    Copyright (C) 1992-2016 Free Software Foundation, Inc.
3    Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4    and currently maintained by, Jim Wilson (wilson@cygnus.com)
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 /* Instruction scheduling pass.  This file, along with sched-deps.c,
23    contains the generic parts.  The actual entry point for
24    the normal instruction scheduling pass is found in sched-rgn.c.
25 
26    We compute insn priorities based on data dependencies.  Flow
27    analysis only creates a fraction of the data-dependencies we must
28    observe: namely, only those dependencies which the combiner can be
29    expected to use.  For this pass, we must therefore create the
30    remaining dependencies we need to observe: register dependencies,
31    memory dependencies, dependencies to keep function calls in order,
32    and the dependence between a conditional branch and the setting of
33    condition codes are all dealt with here.
34 
35    The scheduler first traverses the data flow graph, starting with
36    the last instruction, and proceeding to the first, assigning values
37    to insn_priority as it goes.  This sorts the instructions
38    topologically by data dependence.
39 
40    Once priorities have been established, we order the insns using
41    list scheduling.  This works as follows: starting with a list of
42    all the ready insns, and sorted according to priority number, we
43    schedule the insn from the end of the list by placing its
44    predecessors in the list according to their priority order.  We
45    consider this insn scheduled by setting the pointer to the "end" of
46    the list to point to the previous insn.  When an insn has no
47    predecessors, we either queue it until sufficient time has elapsed
48    or add it to the ready list.  As the instructions are scheduled or
49    when stalls are introduced, the queue advances and dumps insns into
50    the ready list.  When all insns down to the lowest priority have
51    been scheduled, the critical path of the basic block has been made
52    as short as possible.  The remaining insns are then scheduled in
53    remaining slots.
54 
55    The following list shows the order in which we want to break ties
56    among insns in the ready list:
57 
58    1.  choose insn with the longest path to end of bb, ties
59    broken by
60    2.  choose insn with least contribution to register pressure,
61    ties broken by
62    3.  prefer in-block upon interblock motion, ties broken by
63    4.  prefer useful upon speculative motion, ties broken by
64    5.  choose insn with largest control flow probability, ties
65    broken by
66    6.  choose insn with the least dependences upon the previously
67    scheduled insn, or finally
68    7   choose the insn which has the most insns dependent on it.
69    8.  choose insn with lowest UID.
70 
71    Memory references complicate matters.  Only if we can be certain
72    that memory references are not part of the data dependency graph
73    (via true, anti, or output dependence), can we move operations past
74    memory references.  To first approximation, reads can be done
75    independently, while writes introduce dependencies.  Better
76    approximations will yield fewer dependencies.
77 
78    Before reload, an extended analysis of interblock data dependences
79    is required for interblock scheduling.  This is performed in
80    compute_block_dependences ().
81 
82    Dependencies set up by memory references are treated in exactly the
83    same way as other dependencies, by using insn backward dependences
84    INSN_BACK_DEPS.  INSN_BACK_DEPS are translated into forward dependences
85    INSN_FORW_DEPS for the purpose of forward list scheduling.
86 
87    Having optimized the critical path, we may have also unduly
88    extended the lifetimes of some registers.  If an operation requires
89    that constants be loaded into registers, it is certainly desirable
90    to load those constants as early as necessary, but no earlier.
91    I.e., it will not do to load up a bunch of registers at the
92    beginning of a basic block only to use them at the end, if they
93    could be loaded later, since this may result in excessive register
94    utilization.
95 
96    Note that since branches are never in basic blocks, but only end
97    basic blocks, this pass will not move branches.  But that is ok,
98    since we can use GNU's delayed branch scheduling pass to take care
99    of this case.
100 
101    Also note that no further optimizations based on algebraic
102    identities are performed, so this pass would be a good one to
103    perform instruction splitting, such as breaking up a multiply
104    instruction into shifts and adds where that is profitable.
105 
106    Given the memory aliasing analysis that this pass should perform,
107    it should be possible to remove redundant stores to memory, and to
108    load values from registers instead of hitting memory.
109 
110    Before reload, speculative insns are moved only if a 'proof' exists
111    that no exception will be caused by this, and if no live registers
112    exist that inhibit the motion (live registers constraints are not
113    represented by data dependence edges).
114 
115    This pass must update information that subsequent passes expect to
116    be correct.  Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
117    reg_n_calls_crossed, and reg_live_length.  Also, BB_HEAD, BB_END.
118 
119    The information in the line number notes is carefully retained by
120    this pass.  Notes that refer to the starting and ending of
121    exception regions are also carefully retained by this pass.  All
122    other NOTE insns are grouped in their same relative order at the
123    beginning of basic blocks and regions that have been scheduled.  */
124 
125 #include "config.h"
126 #include "system.h"
127 #include "coretypes.h"
128 #include "backend.h"
129 #include "target.h"
130 #include "rtl.h"
131 #include "cfghooks.h"
132 #include "df.h"
133 #include "tm_p.h"
134 #include "insn-config.h"
135 #include "regs.h"
136 #include "ira.h"
137 #include "recog.h"
138 #include "insn-attr.h"
139 #include "cfgrtl.h"
140 #include "cfgbuild.h"
141 #include "sched-int.h"
142 #include "common/common-target.h"
143 #include "params.h"
144 #include "dbgcnt.h"
145 #include "cfgloop.h"
146 #include "dumpfile.h"
147 #include "print-rtl.h"
148 
149 #ifdef INSN_SCHEDULING
150 
151 /* True if we do register pressure relief through live-range
152    shrinkage.  */
153 static bool live_range_shrinkage_p;
154 
155 /* Switch on live range shrinkage.  */
156 void
initialize_live_range_shrinkage(void)157 initialize_live_range_shrinkage (void)
158 {
159   live_range_shrinkage_p = true;
160 }
161 
162 /* Switch off live range shrinkage.  */
163 void
finish_live_range_shrinkage(void)164 finish_live_range_shrinkage (void)
165 {
166   live_range_shrinkage_p = false;
167 }
168 
169 /* issue_rate is the number of insns that can be scheduled in the same
170    machine cycle.  It can be defined in the config/mach/mach.h file,
171    otherwise we set it to 1.  */
172 
173 int issue_rate;
174 
175 /* This can be set to true by a backend if the scheduler should not
176    enable a DCE pass.  */
177 bool sched_no_dce;
178 
179 /* The current initiation interval used when modulo scheduling.  */
180 static int modulo_ii;
181 
182 /* The maximum number of stages we are prepared to handle.  */
183 static int modulo_max_stages;
184 
185 /* The number of insns that exist in each iteration of the loop.  We use this
186    to detect when we've scheduled all insns from the first iteration.  */
187 static int modulo_n_insns;
188 
189 /* The current count of insns in the first iteration of the loop that have
190    already been scheduled.  */
191 static int modulo_insns_scheduled;
192 
193 /* The maximum uid of insns from the first iteration of the loop.  */
194 static int modulo_iter0_max_uid;
195 
196 /* The number of times we should attempt to backtrack when modulo scheduling.
197    Decreased each time we have to backtrack.  */
198 static int modulo_backtracks_left;
199 
200 /* The stage in which the last insn from the original loop was
201    scheduled.  */
202 static int modulo_last_stage;
203 
204 /* sched-verbose controls the amount of debugging output the
205    scheduler prints.  It is controlled by -fsched-verbose=N:
206    N=0: no debugging output.
207    N=1: default value.
208    N=2: bb's probabilities, detailed ready list info, unit/insn info.
209    N=3: rtl at abort point, control-flow, regions info.
210    N=5: dependences info.  */
211 int sched_verbose = 0;
212 
213 /* Debugging file.  All printouts are sent to dump. */
214 FILE *sched_dump = 0;
215 
216 /* This is a placeholder for the scheduler parameters common
217    to all schedulers.  */
218 struct common_sched_info_def *common_sched_info;
219 
220 #define INSN_TICK(INSN)	(HID (INSN)->tick)
221 #define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick)
222 #define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate)
223 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
224 #define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
225 #define SHADOW_P(INSN) (HID (INSN)->shadow_p)
226 #define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
227 /* Cached cost of the instruction.  Use insn_cost to get cost of the
228    insn.  -1 here means that the field is not initialized.  */
229 #define INSN_COST(INSN)	(HID (INSN)->cost)
230 
231 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
232    then it should be recalculated from scratch.  */
233 #define INVALID_TICK (-(max_insn_queue_index + 1))
234 /* The minimal value of the INSN_TICK of an instruction.  */
235 #define MIN_TICK (-max_insn_queue_index)
236 
237 /* Original order of insns in the ready list.
238    Used to keep order of normal insns while separating DEBUG_INSNs.  */
239 #define INSN_RFS_DEBUG_ORIG_ORDER(INSN) (HID (INSN)->rfs_debug_orig_order)
240 
241 /* The deciding reason for INSN's place in the ready list.  */
242 #define INSN_LAST_RFS_WIN(INSN) (HID (INSN)->last_rfs_win)
243 
244 /* List of important notes we must keep around.  This is a pointer to the
245    last element in the list.  */
246 rtx_insn *note_list;
247 
248 static struct spec_info_def spec_info_var;
249 /* Description of the speculative part of the scheduling.
250    If NULL - no speculation.  */
251 spec_info_t spec_info = NULL;
252 
253 /* True, if recovery block was added during scheduling of current block.
254    Used to determine, if we need to fix INSN_TICKs.  */
255 static bool haifa_recovery_bb_recently_added_p;
256 
257 /* True, if recovery block was added during this scheduling pass.
258    Used to determine if we should have empty memory pools of dependencies
259    after finishing current region.  */
260 bool haifa_recovery_bb_ever_added_p;
261 
262 /* Counters of different types of speculative instructions.  */
263 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
264 
265 /* Array used in {unlink, restore}_bb_notes.  */
266 static rtx_insn **bb_header = 0;
267 
268 /* Basic block after which recovery blocks will be created.  */
269 static basic_block before_recovery;
270 
271 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
272    created it.  */
273 basic_block after_recovery;
274 
275 /* FALSE if we add bb to another region, so we don't need to initialize it.  */
276 bool adding_bb_to_current_region_p = true;
277 
278 /* Queues, etc.  */
279 
280 /* An instruction is ready to be scheduled when all insns preceding it
281    have already been scheduled.  It is important to ensure that all
282    insns which use its result will not be executed until its result
283    has been computed.  An insn is maintained in one of four structures:
284 
285    (P) the "Pending" set of insns which cannot be scheduled until
286    their dependencies have been satisfied.
287    (Q) the "Queued" set of insns that can be scheduled when sufficient
288    time has passed.
289    (R) the "Ready" list of unscheduled, uncommitted insns.
290    (S) the "Scheduled" list of insns.
291 
292    Initially, all insns are either "Pending" or "Ready" depending on
293    whether their dependencies are satisfied.
294 
295    Insns move from the "Ready" list to the "Scheduled" list as they
296    are committed to the schedule.  As this occurs, the insns in the
297    "Pending" list have their dependencies satisfied and move to either
298    the "Ready" list or the "Queued" set depending on whether
299    sufficient time has passed to make them ready.  As time passes,
300    insns move from the "Queued" set to the "Ready" list.
301 
302    The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
303    unscheduled insns, i.e., those that are ready, queued, and pending.
304    The "Queued" set (Q) is implemented by the variable `insn_queue'.
305    The "Ready" list (R) is implemented by the variables `ready' and
306    `n_ready'.
307    The "Scheduled" list (S) is the new insn chain built by this pass.
308 
309    The transition (R->S) is implemented in the scheduling loop in
310    `schedule_block' when the best insn to schedule is chosen.
311    The transitions (P->R and P->Q) are implemented in `schedule_insn' as
312    insns move from the ready list to the scheduled list.
313    The transition (Q->R) is implemented in 'queue_to_insn' as time
314    passes or stalls are introduced.  */
315 
316 /* Implement a circular buffer to delay instructions until sufficient
317    time has passed.  For the new pipeline description interface,
318    MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
319    than maximal time of instruction execution computed by genattr.c on
320    the base maximal time of functional unit reservations and getting a
321    result.  This is the longest time an insn may be queued.  */
322 
323 static rtx_insn_list **insn_queue;
324 static int q_ptr = 0;
325 static int q_size = 0;
326 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
327 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
328 
329 #define QUEUE_SCHEDULED (-3)
330 #define QUEUE_NOWHERE   (-2)
331 #define QUEUE_READY     (-1)
332 /* QUEUE_SCHEDULED - INSN is scheduled.
333    QUEUE_NOWHERE   - INSN isn't scheduled yet and is neither in
334    queue or ready list.
335    QUEUE_READY     - INSN is in ready list.
336    N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles.  */
337 
338 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
339 
340 /* The following variable value refers for all current and future
341    reservations of the processor units.  */
342 state_t curr_state;
343 
344 /* The following variable value is size of memory representing all
345    current and future reservations of the processor units.  */
346 size_t dfa_state_size;
347 
348 /* The following array is used to find the best insn from ready when
349    the automaton pipeline interface is used.  */
350 signed char *ready_try = NULL;
351 
352 /* The ready list.  */
353 struct ready_list ready = {NULL, 0, 0, 0, 0};
354 
355 /* The pointer to the ready list (to be removed).  */
356 static struct ready_list *readyp = &ready;
357 
358 /* Scheduling clock.  */
359 static int clock_var;
360 
361 /* Clock at which the previous instruction was issued.  */
362 static int last_clock_var;
363 
364 /* Set to true if, when queuing a shadow insn, we discover that it would be
365    scheduled too late.  */
366 static bool must_backtrack;
367 
368 /* The following variable value is number of essential insns issued on
369    the current cycle.  An insn is essential one if it changes the
370    processors state.  */
371 int cycle_issued_insns;
372 
373 /* This records the actual schedule.  It is built up during the main phase
374    of schedule_block, and afterwards used to reorder the insns in the RTL.  */
375 static vec<rtx_insn *> scheduled_insns;
376 
377 static int may_trap_exp (const_rtx, int);
378 
379 /* Nonzero iff the address is comprised from at most 1 register.  */
380 #define CONST_BASED_ADDRESS_P(x)			\
381   (REG_P (x)					\
382    || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS	\
383 	|| (GET_CODE (x) == LO_SUM))			\
384        && (CONSTANT_P (XEXP (x, 0))			\
385 	   || CONSTANT_P (XEXP (x, 1)))))
386 
387 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
388    as found by analyzing insn's expression.  */
389 
390 
391 static int haifa_luid_for_non_insn (rtx x);
392 
393 /* Haifa version of sched_info hooks common to all headers.  */
394 const struct common_sched_info_def haifa_common_sched_info =
395   {
396     NULL, /* fix_recovery_cfg */
397     NULL, /* add_block */
398     NULL, /* estimate_number_of_insns */
399     haifa_luid_for_non_insn, /* luid_for_non_insn */
400     SCHED_PASS_UNKNOWN /* sched_pass_id */
401   };
402 
403 /* Mapping from instruction UID to its Logical UID.  */
404 vec<int> sched_luids = vNULL;
405 
406 /* Next LUID to assign to an instruction.  */
407 int sched_max_luid = 1;
408 
409 /* Haifa Instruction Data.  */
410 vec<haifa_insn_data_def> h_i_d = vNULL;
411 
412 void (* sched_init_only_bb) (basic_block, basic_block);
413 
414 /* Split block function.  Different schedulers might use different functions
415    to handle their internal data consistent.  */
416 basic_block (* sched_split_block) (basic_block, rtx);
417 
418 /* Create empty basic block after the specified block.  */
419 basic_block (* sched_create_empty_bb) (basic_block);
420 
421 /* Return the number of cycles until INSN is expected to be ready.
422    Return zero if it already is.  */
423 static int
insn_delay(rtx_insn * insn)424 insn_delay (rtx_insn *insn)
425 {
426   return MAX (INSN_TICK (insn) - clock_var, 0);
427 }
428 
429 static int
may_trap_exp(const_rtx x,int is_store)430 may_trap_exp (const_rtx x, int is_store)
431 {
432   enum rtx_code code;
433 
434   if (x == 0)
435     return TRAP_FREE;
436   code = GET_CODE (x);
437   if (is_store)
438     {
439       if (code == MEM && may_trap_p (x))
440 	return TRAP_RISKY;
441       else
442 	return TRAP_FREE;
443     }
444   if (code == MEM)
445     {
446       /* The insn uses memory:  a volatile load.  */
447       if (MEM_VOLATILE_P (x))
448 	return IRISKY;
449       /* An exception-free load.  */
450       if (!may_trap_p (x))
451 	return IFREE;
452       /* A load with 1 base register, to be further checked.  */
453       if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
454 	return PFREE_CANDIDATE;
455       /* No info on the load, to be further checked.  */
456       return PRISKY_CANDIDATE;
457     }
458   else
459     {
460       const char *fmt;
461       int i, insn_class = TRAP_FREE;
462 
463       /* Neither store nor load, check if it may cause a trap.  */
464       if (may_trap_p (x))
465 	return TRAP_RISKY;
466       /* Recursive step: walk the insn...  */
467       fmt = GET_RTX_FORMAT (code);
468       for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
469 	{
470 	  if (fmt[i] == 'e')
471 	    {
472 	      int tmp_class = may_trap_exp (XEXP (x, i), is_store);
473 	      insn_class = WORST_CLASS (insn_class, tmp_class);
474 	    }
475 	  else if (fmt[i] == 'E')
476 	    {
477 	      int j;
478 	      for (j = 0; j < XVECLEN (x, i); j++)
479 		{
480 		  int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
481 		  insn_class = WORST_CLASS (insn_class, tmp_class);
482 		  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
483 		    break;
484 		}
485 	    }
486 	  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
487 	    break;
488 	}
489       return insn_class;
490     }
491 }
492 
493 /* Classifies rtx X of an insn for the purpose of verifying that X can be
494    executed speculatively (and consequently the insn can be moved
495    speculatively), by examining X, returning:
496    TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
497    TRAP_FREE: non-load insn.
498    IFREE: load from a globally safe location.
499    IRISKY: volatile load.
500    PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
501    being either PFREE or PRISKY.  */
502 
503 static int
haifa_classify_rtx(const_rtx x)504 haifa_classify_rtx (const_rtx x)
505 {
506   int tmp_class = TRAP_FREE;
507   int insn_class = TRAP_FREE;
508   enum rtx_code code;
509 
510   if (GET_CODE (x) == PARALLEL)
511     {
512       int i, len = XVECLEN (x, 0);
513 
514       for (i = len - 1; i >= 0; i--)
515 	{
516 	  tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
517 	  insn_class = WORST_CLASS (insn_class, tmp_class);
518 	  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
519 	    break;
520 	}
521     }
522   else
523     {
524       code = GET_CODE (x);
525       switch (code)
526 	{
527 	case CLOBBER:
528 	  /* Test if it is a 'store'.  */
529 	  tmp_class = may_trap_exp (XEXP (x, 0), 1);
530 	  break;
531 	case SET:
532 	  /* Test if it is a store.  */
533 	  tmp_class = may_trap_exp (SET_DEST (x), 1);
534 	  if (tmp_class == TRAP_RISKY)
535 	    break;
536 	  /* Test if it is a load.  */
537 	  tmp_class =
538 	    WORST_CLASS (tmp_class,
539 			 may_trap_exp (SET_SRC (x), 0));
540 	  break;
541 	case COND_EXEC:
542 	  tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
543 	  if (tmp_class == TRAP_RISKY)
544 	    break;
545 	  tmp_class = WORST_CLASS (tmp_class,
546 				   may_trap_exp (COND_EXEC_TEST (x), 0));
547 	  break;
548 	case TRAP_IF:
549 	  tmp_class = TRAP_RISKY;
550 	  break;
551 	default:;
552 	}
553       insn_class = tmp_class;
554     }
555 
556   return insn_class;
557 }
558 
559 int
haifa_classify_insn(const_rtx insn)560 haifa_classify_insn (const_rtx insn)
561 {
562   return haifa_classify_rtx (PATTERN (insn));
563 }
564 
565 /* After the scheduler initialization function has been called, this function
566    can be called to enable modulo scheduling.  II is the initiation interval
567    we should use, it affects the delays for delay_pairs that were recorded as
568    separated by a given number of stages.
569 
570    MAX_STAGES provides us with a limit
571    after which we give up scheduling; the caller must have unrolled at least
572    as many copies of the loop body and recorded delay_pairs for them.
573 
574    INSNS is the number of real (non-debug) insns in one iteration of
575    the loop.  MAX_UID can be used to test whether an insn belongs to
576    the first iteration of the loop; all of them have a uid lower than
577    MAX_UID.  */
578 void
set_modulo_params(int ii,int max_stages,int insns,int max_uid)579 set_modulo_params (int ii, int max_stages, int insns, int max_uid)
580 {
581   modulo_ii = ii;
582   modulo_max_stages = max_stages;
583   modulo_n_insns = insns;
584   modulo_iter0_max_uid = max_uid;
585   modulo_backtracks_left = PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS);
586 }
587 
588 /* A structure to record a pair of insns where the first one is a real
589    insn that has delay slots, and the second is its delayed shadow.
590    I1 is scheduled normally and will emit an assembly instruction,
591    while I2 describes the side effect that takes place at the
592    transition between cycles CYCLES and (CYCLES + 1) after I1.  */
593 struct delay_pair
594 {
595   struct delay_pair *next_same_i1;
596   rtx_insn *i1, *i2;
597   int cycles;
598   /* When doing modulo scheduling, we a delay_pair can also be used to
599      show that I1 and I2 are the same insn in a different stage.  If that
600      is the case, STAGES will be nonzero.  */
601   int stages;
602 };
603 
604 /* Helpers for delay hashing.  */
605 
606 struct delay_i1_hasher : nofree_ptr_hash <delay_pair>
607 {
608   typedef void *compare_type;
609   static inline hashval_t hash (const delay_pair *);
610   static inline bool equal (const delay_pair *, const void *);
611 };
612 
613 /* Returns a hash value for X, based on hashing just I1.  */
614 
615 inline hashval_t
hash(const delay_pair * x)616 delay_i1_hasher::hash (const delay_pair *x)
617 {
618   return htab_hash_pointer (x->i1);
619 }
620 
621 /* Return true if I1 of pair X is the same as that of pair Y.  */
622 
623 inline bool
equal(const delay_pair * x,const void * y)624 delay_i1_hasher::equal (const delay_pair *x, const void *y)
625 {
626   return x->i1 == y;
627 }
628 
629 struct delay_i2_hasher : free_ptr_hash <delay_pair>
630 {
631   typedef void *compare_type;
632   static inline hashval_t hash (const delay_pair *);
633   static inline bool equal (const delay_pair *, const void *);
634 };
635 
636 /* Returns a hash value for X, based on hashing just I2.  */
637 
638 inline hashval_t
hash(const delay_pair * x)639 delay_i2_hasher::hash (const delay_pair *x)
640 {
641   return htab_hash_pointer (x->i2);
642 }
643 
644 /* Return true if I2 of pair X is the same as that of pair Y.  */
645 
646 inline bool
equal(const delay_pair * x,const void * y)647 delay_i2_hasher::equal (const delay_pair *x, const void *y)
648 {
649   return x->i2 == y;
650 }
651 
652 /* Two hash tables to record delay_pairs, one indexed by I1 and the other
653    indexed by I2.  */
654 static hash_table<delay_i1_hasher> *delay_htab;
655 static hash_table<delay_i2_hasher> *delay_htab_i2;
656 
657 /* Called through htab_traverse.  Walk the hashtable using I2 as
658    index, and delete all elements involving an UID higher than
659    that pointed to by *DATA.  */
660 int
haifa_htab_i2_traverse(delay_pair ** slot,int * data)661 haifa_htab_i2_traverse (delay_pair **slot, int *data)
662 {
663   int maxuid = *data;
664   struct delay_pair *p = *slot;
665   if (INSN_UID (p->i2) >= maxuid || INSN_UID (p->i1) >= maxuid)
666     {
667       delay_htab_i2->clear_slot (slot);
668     }
669   return 1;
670 }
671 
672 /* Called through htab_traverse.  Walk the hashtable using I2 as
673    index, and delete all elements involving an UID higher than
674    that pointed to by *DATA.  */
675 int
haifa_htab_i1_traverse(delay_pair ** pslot,int * data)676 haifa_htab_i1_traverse (delay_pair **pslot, int *data)
677 {
678   int maxuid = *data;
679   struct delay_pair *p, *first, **pprev;
680 
681   if (INSN_UID ((*pslot)->i1) >= maxuid)
682     {
683       delay_htab->clear_slot (pslot);
684       return 1;
685     }
686   pprev = &first;
687   for (p = *pslot; p; p = p->next_same_i1)
688     {
689       if (INSN_UID (p->i2) < maxuid)
690 	{
691 	  *pprev = p;
692 	  pprev = &p->next_same_i1;
693 	}
694     }
695   *pprev = NULL;
696   if (first == NULL)
697     delay_htab->clear_slot (pslot);
698   else
699     *pslot = first;
700   return 1;
701 }
702 
703 /* Discard all delay pairs which involve an insn with an UID higher
704    than MAX_UID.  */
705 void
discard_delay_pairs_above(int max_uid)706 discard_delay_pairs_above (int max_uid)
707 {
708   delay_htab->traverse <int *, haifa_htab_i1_traverse> (&max_uid);
709   delay_htab_i2->traverse <int *, haifa_htab_i2_traverse> (&max_uid);
710 }
711 
712 /* This function can be called by a port just before it starts the final
713    scheduling pass.  It records the fact that an instruction with delay
714    slots has been split into two insns, I1 and I2.  The first one will be
715    scheduled normally and initiates the operation.  The second one is a
716    shadow which must follow a specific number of cycles after I1; its only
717    purpose is to show the side effect that occurs at that cycle in the RTL.
718    If a JUMP_INSN or a CALL_INSN has been split, I1 should be a normal INSN,
719    while I2 retains the original insn type.
720 
721    There are two ways in which the number of cycles can be specified,
722    involving the CYCLES and STAGES arguments to this function.  If STAGES
723    is zero, we just use the value of CYCLES.  Otherwise, STAGES is a factor
724    which is multiplied by MODULO_II to give the number of cycles.  This is
725    only useful if the caller also calls set_modulo_params to enable modulo
726    scheduling.  */
727 
728 void
record_delay_slot_pair(rtx_insn * i1,rtx_insn * i2,int cycles,int stages)729 record_delay_slot_pair (rtx_insn *i1, rtx_insn *i2, int cycles, int stages)
730 {
731   struct delay_pair *p = XNEW (struct delay_pair);
732   struct delay_pair **slot;
733 
734   p->i1 = i1;
735   p->i2 = i2;
736   p->cycles = cycles;
737   p->stages = stages;
738 
739   if (!delay_htab)
740     {
741       delay_htab = new hash_table<delay_i1_hasher> (10);
742       delay_htab_i2 = new hash_table<delay_i2_hasher> (10);
743     }
744   slot = delay_htab->find_slot_with_hash (i1, htab_hash_pointer (i1), INSERT);
745   p->next_same_i1 = *slot;
746   *slot = p;
747   slot = delay_htab_i2->find_slot (p, INSERT);
748   *slot = p;
749 }
750 
751 /* Examine the delay pair hashtable to see if INSN is a shadow for another,
752    and return the other insn if so.  Return NULL otherwise.  */
753 rtx_insn *
real_insn_for_shadow(rtx_insn * insn)754 real_insn_for_shadow (rtx_insn *insn)
755 {
756   struct delay_pair *pair;
757 
758   if (!delay_htab)
759     return NULL;
760 
761   pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
762   if (!pair || pair->stages > 0)
763     return NULL;
764   return pair->i1;
765 }
766 
767 /* For a pair P of insns, return the fixed distance in cycles from the first
768    insn after which the second must be scheduled.  */
769 static int
pair_delay(struct delay_pair * p)770 pair_delay (struct delay_pair *p)
771 {
772   if (p->stages == 0)
773     return p->cycles;
774   else
775     return p->stages * modulo_ii;
776 }
777 
778 /* Given an insn INSN, add a dependence on its delayed shadow if it
779    has one.  Also try to find situations where shadows depend on each other
780    and add dependencies to the real insns to limit the amount of backtracking
781    needed.  */
782 void
add_delay_dependencies(rtx_insn * insn)783 add_delay_dependencies (rtx_insn *insn)
784 {
785   struct delay_pair *pair;
786   sd_iterator_def sd_it;
787   dep_t dep;
788 
789   if (!delay_htab)
790     return;
791 
792   pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
793   if (!pair)
794     return;
795   add_dependence (insn, pair->i1, REG_DEP_ANTI);
796   if (pair->stages)
797     return;
798 
799   FOR_EACH_DEP (pair->i2, SD_LIST_BACK, sd_it, dep)
800     {
801       rtx_insn *pro = DEP_PRO (dep);
802       struct delay_pair *other_pair
803 	= delay_htab_i2->find_with_hash (pro, htab_hash_pointer (pro));
804       if (!other_pair || other_pair->stages)
805 	continue;
806       if (pair_delay (other_pair) >= pair_delay (pair))
807 	{
808 	  if (sched_verbose >= 4)
809 	    {
810 	      fprintf (sched_dump, ";;\tadding dependence %d <- %d\n",
811 		       INSN_UID (other_pair->i1),
812 		       INSN_UID (pair->i1));
813 	      fprintf (sched_dump, ";;\tpair1 %d <- %d, cost %d\n",
814 		       INSN_UID (pair->i1),
815 		       INSN_UID (pair->i2),
816 		       pair_delay (pair));
817 	      fprintf (sched_dump, ";;\tpair2 %d <- %d, cost %d\n",
818 		       INSN_UID (other_pair->i1),
819 		       INSN_UID (other_pair->i2),
820 		       pair_delay (other_pair));
821 	    }
822 	  add_dependence (pair->i1, other_pair->i1, REG_DEP_ANTI);
823 	}
824     }
825 }
826 
827 /* Forward declarations.  */
828 
829 static int priority (rtx_insn *);
830 static int autopref_rank_for_schedule (const rtx_insn *, const rtx_insn *);
831 static int rank_for_schedule (const void *, const void *);
832 static void swap_sort (rtx_insn **, int);
833 static void queue_insn (rtx_insn *, int, const char *);
834 static int schedule_insn (rtx_insn *);
835 static void adjust_priority (rtx_insn *);
836 static void advance_one_cycle (void);
837 static void extend_h_i_d (void);
838 
839 
840 /* Notes handling mechanism:
841    =========================
842    Generally, NOTES are saved before scheduling and restored after scheduling.
843    The scheduler distinguishes between two types of notes:
844 
845    (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
846    Before scheduling a region, a pointer to the note is added to the insn
847    that follows or precedes it.  (This happens as part of the data dependence
848    computation).  After scheduling an insn, the pointer contained in it is
849    used for regenerating the corresponding note (in reemit_notes).
850 
851    (2) All other notes (e.g. INSN_DELETED):  Before scheduling a block,
852    these notes are put in a list (in rm_other_notes() and
853    unlink_other_notes ()).  After scheduling the block, these notes are
854    inserted at the beginning of the block (in schedule_block()).  */
855 
856 static void ready_add (struct ready_list *, rtx_insn *, bool);
857 static rtx_insn *ready_remove_first (struct ready_list *);
858 static rtx_insn *ready_remove_first_dispatch (struct ready_list *ready);
859 
860 static void queue_to_ready (struct ready_list *);
861 static int early_queue_to_ready (state_t, struct ready_list *);
862 
863 /* The following functions are used to implement multi-pass scheduling
864    on the first cycle.  */
865 static rtx_insn *ready_remove (struct ready_list *, int);
866 static void ready_remove_insn (rtx_insn *);
867 
868 static void fix_inter_tick (rtx_insn *, rtx_insn *);
869 static int fix_tick_ready (rtx_insn *);
870 static void change_queue_index (rtx_insn *, int);
871 
872 /* The following functions are used to implement scheduling of data/control
873    speculative instructions.  */
874 
875 static void extend_h_i_d (void);
876 static void init_h_i_d (rtx_insn *);
877 static int haifa_speculate_insn (rtx_insn *, ds_t, rtx *);
878 static void generate_recovery_code (rtx_insn *);
879 static void process_insn_forw_deps_be_in_spec (rtx_insn *, rtx_insn *, ds_t);
880 static void begin_speculative_block (rtx_insn *);
881 static void add_to_speculative_block (rtx_insn *);
882 static void init_before_recovery (basic_block *);
883 static void create_check_block_twin (rtx_insn *, bool);
884 static void fix_recovery_deps (basic_block);
885 static bool haifa_change_pattern (rtx_insn *, rtx);
886 static void dump_new_block_header (int, basic_block, rtx_insn *, rtx_insn *);
887 static void restore_bb_notes (basic_block);
888 static void fix_jump_move (rtx_insn *);
889 static void move_block_after_check (rtx_insn *);
890 static void move_succs (vec<edge, va_gc> **, basic_block);
891 static void sched_remove_insn (rtx_insn *);
892 static void clear_priorities (rtx_insn *, rtx_vec_t *);
893 static void calc_priorities (rtx_vec_t);
894 static void add_jump_dependencies (rtx_insn *, rtx_insn *);
895 
896 #endif /* INSN_SCHEDULING */
897 
898 /* Point to state used for the current scheduling pass.  */
899 struct haifa_sched_info *current_sched_info;
900 
901 #ifndef INSN_SCHEDULING
902 void
schedule_insns(void)903 schedule_insns (void)
904 {
905 }
906 #else
907 
908 /* Do register pressure sensitive insn scheduling if the flag is set
909    up.  */
910 enum sched_pressure_algorithm sched_pressure;
911 
912 /* Map regno -> its pressure class.  The map defined only when
913    SCHED_PRESSURE != SCHED_PRESSURE_NONE.  */
914 enum reg_class *sched_regno_pressure_class;
915 
916 /* The current register pressure.  Only elements corresponding pressure
917    classes are defined.  */
918 static int curr_reg_pressure[N_REG_CLASSES];
919 
920 /* Saved value of the previous array.  */
921 static int saved_reg_pressure[N_REG_CLASSES];
922 
923 /* Register living at given scheduling point.  */
924 static bitmap curr_reg_live;
925 
926 /* Saved value of the previous array.  */
927 static bitmap saved_reg_live;
928 
929 /* Registers mentioned in the current region.  */
930 static bitmap region_ref_regs;
931 
932 /* Effective number of available registers of a given class (see comment
933    in sched_pressure_start_bb).  */
934 static int sched_class_regs_num[N_REG_CLASSES];
935 /* Number of call_used_regs.  This is a helper for calculating of
936    sched_class_regs_num.  */
937 static int call_used_regs_num[N_REG_CLASSES];
938 
939 /* Initiate register pressure relative info for scheduling the current
940    region.  Currently it is only clearing register mentioned in the
941    current region.  */
942 void
sched_init_region_reg_pressure_info(void)943 sched_init_region_reg_pressure_info (void)
944 {
945   bitmap_clear (region_ref_regs);
946 }
947 
948 /* PRESSURE[CL] describes the pressure on register class CL.  Update it
949    for the birth (if BIRTH_P) or death (if !BIRTH_P) of register REGNO.
950    LIVE tracks the set of live registers; if it is null, assume that
951    every birth or death is genuine.  */
952 static inline void
mark_regno_birth_or_death(bitmap live,int * pressure,int regno,bool birth_p)953 mark_regno_birth_or_death (bitmap live, int *pressure, int regno, bool birth_p)
954 {
955   enum reg_class pressure_class;
956 
957   pressure_class = sched_regno_pressure_class[regno];
958   if (regno >= FIRST_PSEUDO_REGISTER)
959     {
960       if (pressure_class != NO_REGS)
961 	{
962 	  if (birth_p)
963 	    {
964 	      if (!live || bitmap_set_bit (live, regno))
965 		pressure[pressure_class]
966 		  += (ira_reg_class_max_nregs
967 		      [pressure_class][PSEUDO_REGNO_MODE (regno)]);
968 	    }
969 	  else
970 	    {
971 	      if (!live || bitmap_clear_bit (live, regno))
972 		pressure[pressure_class]
973 		  -= (ira_reg_class_max_nregs
974 		      [pressure_class][PSEUDO_REGNO_MODE (regno)]);
975 	    }
976 	}
977     }
978   else if (pressure_class != NO_REGS
979 	   && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
980     {
981       if (birth_p)
982 	{
983 	  if (!live || bitmap_set_bit (live, regno))
984 	    pressure[pressure_class]++;
985 	}
986       else
987 	{
988 	  if (!live || bitmap_clear_bit (live, regno))
989 	    pressure[pressure_class]--;
990 	}
991     }
992 }
993 
994 /* Initiate current register pressure related info from living
995    registers given by LIVE.  */
996 static void
initiate_reg_pressure_info(bitmap live)997 initiate_reg_pressure_info (bitmap live)
998 {
999   int i;
1000   unsigned int j;
1001   bitmap_iterator bi;
1002 
1003   for (i = 0; i < ira_pressure_classes_num; i++)
1004     curr_reg_pressure[ira_pressure_classes[i]] = 0;
1005   bitmap_clear (curr_reg_live);
1006   EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
1007     if (sched_pressure == SCHED_PRESSURE_MODEL
1008 	|| current_nr_blocks == 1
1009 	|| bitmap_bit_p (region_ref_regs, j))
1010       mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure, j, true);
1011 }
1012 
1013 /* Mark registers in X as mentioned in the current region.  */
1014 static void
setup_ref_regs(rtx x)1015 setup_ref_regs (rtx x)
1016 {
1017   int i, j;
1018   const RTX_CODE code = GET_CODE (x);
1019   const char *fmt;
1020 
1021   if (REG_P (x))
1022     {
1023       bitmap_set_range (region_ref_regs, REGNO (x), REG_NREGS (x));
1024       return;
1025     }
1026   fmt = GET_RTX_FORMAT (code);
1027   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1028     if (fmt[i] == 'e')
1029       setup_ref_regs (XEXP (x, i));
1030     else if (fmt[i] == 'E')
1031       {
1032 	for (j = 0; j < XVECLEN (x, i); j++)
1033 	  setup_ref_regs (XVECEXP (x, i, j));
1034       }
1035 }
1036 
1037 /* Initiate current register pressure related info at the start of
1038    basic block BB.  */
1039 static void
initiate_bb_reg_pressure_info(basic_block bb)1040 initiate_bb_reg_pressure_info (basic_block bb)
1041 {
1042   unsigned int i ATTRIBUTE_UNUSED;
1043   rtx_insn *insn;
1044 
1045   if (current_nr_blocks > 1)
1046     FOR_BB_INSNS (bb, insn)
1047       if (NONDEBUG_INSN_P (insn))
1048 	setup_ref_regs (PATTERN (insn));
1049   initiate_reg_pressure_info (df_get_live_in (bb));
1050   if (bb_has_eh_pred (bb))
1051     for (i = 0; ; ++i)
1052       {
1053 	unsigned int regno = EH_RETURN_DATA_REGNO (i);
1054 
1055 	if (regno == INVALID_REGNUM)
1056 	  break;
1057 	if (! bitmap_bit_p (df_get_live_in (bb), regno))
1058 	  mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
1059 				     regno, true);
1060       }
1061 }
1062 
1063 /* Save current register pressure related info.  */
1064 static void
save_reg_pressure(void)1065 save_reg_pressure (void)
1066 {
1067   int i;
1068 
1069   for (i = 0; i < ira_pressure_classes_num; i++)
1070     saved_reg_pressure[ira_pressure_classes[i]]
1071       = curr_reg_pressure[ira_pressure_classes[i]];
1072   bitmap_copy (saved_reg_live, curr_reg_live);
1073 }
1074 
1075 /* Restore saved register pressure related info.  */
1076 static void
restore_reg_pressure(void)1077 restore_reg_pressure (void)
1078 {
1079   int i;
1080 
1081   for (i = 0; i < ira_pressure_classes_num; i++)
1082     curr_reg_pressure[ira_pressure_classes[i]]
1083       = saved_reg_pressure[ira_pressure_classes[i]];
1084   bitmap_copy (curr_reg_live, saved_reg_live);
1085 }
1086 
1087 /* Return TRUE if the register is dying after its USE.  */
1088 static bool
dying_use_p(struct reg_use_data * use)1089 dying_use_p (struct reg_use_data *use)
1090 {
1091   struct reg_use_data *next;
1092 
1093   for (next = use->next_regno_use; next != use; next = next->next_regno_use)
1094     if (NONDEBUG_INSN_P (next->insn)
1095 	&& QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
1096       return false;
1097   return true;
1098 }
1099 
1100 /* Print info about the current register pressure and its excess for
1101    each pressure class.  */
1102 static void
print_curr_reg_pressure(void)1103 print_curr_reg_pressure (void)
1104 {
1105   int i;
1106   enum reg_class cl;
1107 
1108   fprintf (sched_dump, ";;\t");
1109   for (i = 0; i < ira_pressure_classes_num; i++)
1110     {
1111       cl = ira_pressure_classes[i];
1112       gcc_assert (curr_reg_pressure[cl] >= 0);
1113       fprintf (sched_dump, "  %s:%d(%d)", reg_class_names[cl],
1114 	       curr_reg_pressure[cl],
1115 	       curr_reg_pressure[cl] - sched_class_regs_num[cl]);
1116     }
1117   fprintf (sched_dump, "\n");
1118 }
1119 
1120 /* Determine if INSN has a condition that is clobbered if a register
1121    in SET_REGS is modified.  */
1122 static bool
cond_clobbered_p(rtx_insn * insn,HARD_REG_SET set_regs)1123 cond_clobbered_p (rtx_insn *insn, HARD_REG_SET set_regs)
1124 {
1125   rtx pat = PATTERN (insn);
1126   gcc_assert (GET_CODE (pat) == COND_EXEC);
1127   if (TEST_HARD_REG_BIT (set_regs, REGNO (XEXP (COND_EXEC_TEST (pat), 0))))
1128     {
1129       sd_iterator_def sd_it;
1130       dep_t dep;
1131       haifa_change_pattern (insn, ORIG_PAT (insn));
1132       FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1133 	DEP_STATUS (dep) &= ~DEP_CANCELLED;
1134       TODO_SPEC (insn) = HARD_DEP;
1135       if (sched_verbose >= 2)
1136 	fprintf (sched_dump,
1137 		 ";;\t\tdequeue insn %s because of clobbered condition\n",
1138 		 (*current_sched_info->print_insn) (insn, 0));
1139       return true;
1140     }
1141 
1142   return false;
1143 }
1144 
1145 /* This function should be called after modifying the pattern of INSN,
1146    to update scheduler data structures as needed.  */
1147 static void
update_insn_after_change(rtx_insn * insn)1148 update_insn_after_change (rtx_insn *insn)
1149 {
1150   sd_iterator_def sd_it;
1151   dep_t dep;
1152 
1153   dfa_clear_single_insn_cache (insn);
1154 
1155   sd_it = sd_iterator_start (insn,
1156 			     SD_LIST_FORW | SD_LIST_BACK | SD_LIST_RES_BACK);
1157   while (sd_iterator_cond (&sd_it, &dep))
1158     {
1159       DEP_COST (dep) = UNKNOWN_DEP_COST;
1160       sd_iterator_next (&sd_it);
1161     }
1162 
1163   /* Invalidate INSN_COST, so it'll be recalculated.  */
1164   INSN_COST (insn) = -1;
1165   /* Invalidate INSN_TICK, so it'll be recalculated.  */
1166   INSN_TICK (insn) = INVALID_TICK;
1167 
1168   /* Invalidate autoprefetch data entry.  */
1169   INSN_AUTOPREF_MULTIPASS_DATA (insn)[0].status
1170     = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
1171   INSN_AUTOPREF_MULTIPASS_DATA (insn)[1].status
1172     = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
1173 }
1174 
1175 
1176 /* Two VECs, one to hold dependencies for which pattern replacements
1177    need to be applied or restored at the start of the next cycle, and
1178    another to hold an integer that is either one, to apply the
1179    corresponding replacement, or zero to restore it.  */
1180 static vec<dep_t> next_cycle_replace_deps;
1181 static vec<int> next_cycle_apply;
1182 
1183 static void apply_replacement (dep_t, bool);
1184 static void restore_pattern (dep_t, bool);
1185 
1186 /* Look at the remaining dependencies for insn NEXT, and compute and return
1187    the TODO_SPEC value we should use for it.  This is called after one of
1188    NEXT's dependencies has been resolved.
1189    We also perform pattern replacements for predication, and for broken
1190    replacement dependencies.  The latter is only done if FOR_BACKTRACK is
1191    false.  */
1192 
1193 static ds_t
recompute_todo_spec(rtx_insn * next,bool for_backtrack)1194 recompute_todo_spec (rtx_insn *next, bool for_backtrack)
1195 {
1196   ds_t new_ds;
1197   sd_iterator_def sd_it;
1198   dep_t dep, modify_dep = NULL;
1199   int n_spec = 0;
1200   int n_control = 0;
1201   int n_replace = 0;
1202   bool first_p = true;
1203 
1204   if (sd_lists_empty_p (next, SD_LIST_BACK))
1205     /* NEXT has all its dependencies resolved.  */
1206     return 0;
1207 
1208   if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
1209     return HARD_DEP;
1210 
1211   /* If NEXT is intended to sit adjacent to this instruction, we don't
1212      want to try to break any dependencies.  Treat it as a HARD_DEP.  */
1213   if (SCHED_GROUP_P (next))
1214     return HARD_DEP;
1215 
1216   /* Now we've got NEXT with speculative deps only.
1217      1. Look at the deps to see what we have to do.
1218      2. Check if we can do 'todo'.  */
1219   new_ds = 0;
1220 
1221   FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1222     {
1223       rtx_insn *pro = DEP_PRO (dep);
1224       ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
1225 
1226       if (DEBUG_INSN_P (pro) && !DEBUG_INSN_P (next))
1227 	continue;
1228 
1229       if (ds)
1230 	{
1231 	  n_spec++;
1232 	  if (first_p)
1233 	    {
1234 	      first_p = false;
1235 
1236 	      new_ds = ds;
1237 	    }
1238 	  else
1239 	    new_ds = ds_merge (new_ds, ds);
1240 	}
1241       else if (DEP_TYPE (dep) == REG_DEP_CONTROL)
1242 	{
1243 	  if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1244 	    {
1245 	      n_control++;
1246 	      modify_dep = dep;
1247 	    }
1248 	  DEP_STATUS (dep) &= ~DEP_CANCELLED;
1249 	}
1250       else if (DEP_REPLACE (dep) != NULL)
1251 	{
1252 	  if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1253 	    {
1254 	      n_replace++;
1255 	      modify_dep = dep;
1256 	    }
1257 	  DEP_STATUS (dep) &= ~DEP_CANCELLED;
1258 	}
1259     }
1260 
1261   if (n_replace > 0 && n_control == 0 && n_spec == 0)
1262     {
1263       if (!dbg_cnt (sched_breakdep))
1264 	return HARD_DEP;
1265       FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1266 	{
1267 	  struct dep_replacement *desc = DEP_REPLACE (dep);
1268 	  if (desc != NULL)
1269 	    {
1270 	      if (desc->insn == next && !for_backtrack)
1271 		{
1272 		  gcc_assert (n_replace == 1);
1273 		  apply_replacement (dep, true);
1274 		}
1275 	      DEP_STATUS (dep) |= DEP_CANCELLED;
1276 	    }
1277 	}
1278       return 0;
1279     }
1280 
1281   else if (n_control == 1 && n_replace == 0 && n_spec == 0)
1282     {
1283       rtx_insn *pro, *other;
1284       rtx new_pat;
1285       rtx cond = NULL_RTX;
1286       bool success;
1287       rtx_insn *prev = NULL;
1288       int i;
1289       unsigned regno;
1290 
1291       if ((current_sched_info->flags & DO_PREDICATION) == 0
1292 	  || (ORIG_PAT (next) != NULL_RTX
1293 	      && PREDICATED_PAT (next) == NULL_RTX))
1294 	return HARD_DEP;
1295 
1296       pro = DEP_PRO (modify_dep);
1297       other = real_insn_for_shadow (pro);
1298       if (other != NULL_RTX)
1299 	pro = other;
1300 
1301       cond = sched_get_reverse_condition_uncached (pro);
1302       regno = REGNO (XEXP (cond, 0));
1303 
1304       /* Find the last scheduled insn that modifies the condition register.
1305 	 We can stop looking once we find the insn we depend on through the
1306 	 REG_DEP_CONTROL; if the condition register isn't modified after it,
1307 	 we know that it still has the right value.  */
1308       if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
1309 	FOR_EACH_VEC_ELT_REVERSE (scheduled_insns, i, prev)
1310 	  {
1311 	    HARD_REG_SET t;
1312 
1313 	    find_all_hard_reg_sets (prev, &t, true);
1314 	    if (TEST_HARD_REG_BIT (t, regno))
1315 	      return HARD_DEP;
1316 	    if (prev == pro)
1317 	      break;
1318 	  }
1319       if (ORIG_PAT (next) == NULL_RTX)
1320 	{
1321 	  ORIG_PAT (next) = PATTERN (next);
1322 
1323 	  new_pat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (next));
1324 	  success = haifa_change_pattern (next, new_pat);
1325 	  if (!success)
1326 	    return HARD_DEP;
1327 	  PREDICATED_PAT (next) = new_pat;
1328 	}
1329       else if (PATTERN (next) != PREDICATED_PAT (next))
1330 	{
1331 	  bool success = haifa_change_pattern (next,
1332 					       PREDICATED_PAT (next));
1333 	  gcc_assert (success);
1334 	}
1335       DEP_STATUS (modify_dep) |= DEP_CANCELLED;
1336       return DEP_CONTROL;
1337     }
1338 
1339   if (PREDICATED_PAT (next) != NULL_RTX)
1340     {
1341       int tick = INSN_TICK (next);
1342       bool success = haifa_change_pattern (next,
1343 					   ORIG_PAT (next));
1344       INSN_TICK (next) = tick;
1345       gcc_assert (success);
1346     }
1347 
1348   /* We can't handle the case where there are both speculative and control
1349      dependencies, so we return HARD_DEP in such a case.  Also fail if
1350      we have speculative dependencies with not enough points, or more than
1351      one control dependency.  */
1352   if ((n_spec > 0 && (n_control > 0 || n_replace > 0))
1353       || (n_spec > 0
1354 	  /* Too few points?  */
1355 	  && ds_weak (new_ds) < spec_info->data_weakness_cutoff)
1356       || n_control > 0
1357       || n_replace > 0)
1358     return HARD_DEP;
1359 
1360   return new_ds;
1361 }
1362 
1363 /* Pointer to the last instruction scheduled.  */
1364 static rtx_insn *last_scheduled_insn;
1365 
1366 /* Pointer to the last nondebug instruction scheduled within the
1367    block, or the prev_head of the scheduling block.  Used by
1368    rank_for_schedule, so that insns independent of the last scheduled
1369    insn will be preferred over dependent instructions.  */
1370 static rtx_insn *last_nondebug_scheduled_insn;
1371 
1372 /* Pointer that iterates through the list of unscheduled insns if we
1373    have a dbg_cnt enabled.  It always points at an insn prior to the
1374    first unscheduled one.  */
1375 static rtx_insn *nonscheduled_insns_begin;
1376 
1377 /* Compute cost of executing INSN.
1378    This is the number of cycles between instruction issue and
1379    instruction results.  */
1380 int
insn_cost(rtx_insn * insn)1381 insn_cost (rtx_insn *insn)
1382 {
1383   int cost;
1384 
1385   if (sched_fusion)
1386     return 0;
1387 
1388   if (sel_sched_p ())
1389     {
1390       if (recog_memoized (insn) < 0)
1391 	return 0;
1392 
1393       cost = insn_default_latency (insn);
1394       if (cost < 0)
1395 	cost = 0;
1396 
1397       return cost;
1398     }
1399 
1400   cost = INSN_COST (insn);
1401 
1402   if (cost < 0)
1403     {
1404       /* A USE insn, or something else we don't need to
1405 	 understand.  We can't pass these directly to
1406 	 result_ready_cost or insn_default_latency because it will
1407 	 trigger a fatal error for unrecognizable insns.  */
1408       if (recog_memoized (insn) < 0)
1409 	{
1410 	  INSN_COST (insn) = 0;
1411 	  return 0;
1412 	}
1413       else
1414 	{
1415 	  cost = insn_default_latency (insn);
1416 	  if (cost < 0)
1417 	    cost = 0;
1418 
1419 	  INSN_COST (insn) = cost;
1420 	}
1421     }
1422 
1423   return cost;
1424 }
1425 
1426 /* Compute cost of dependence LINK.
1427    This is the number of cycles between instruction issue and
1428    instruction results.
1429    ??? We also use this function to call recog_memoized on all insns.  */
1430 int
dep_cost_1(dep_t link,dw_t dw)1431 dep_cost_1 (dep_t link, dw_t dw)
1432 {
1433   rtx_insn *insn = DEP_PRO (link);
1434   rtx_insn *used = DEP_CON (link);
1435   int cost;
1436 
1437   if (DEP_COST (link) != UNKNOWN_DEP_COST)
1438     return DEP_COST (link);
1439 
1440   if (delay_htab)
1441     {
1442       struct delay_pair *delay_entry;
1443       delay_entry
1444 	= delay_htab_i2->find_with_hash (used, htab_hash_pointer (used));
1445       if (delay_entry)
1446 	{
1447 	  if (delay_entry->i1 == insn)
1448 	    {
1449 	      DEP_COST (link) = pair_delay (delay_entry);
1450 	      return DEP_COST (link);
1451 	    }
1452 	}
1453     }
1454 
1455   /* A USE insn should never require the value used to be computed.
1456      This allows the computation of a function's result and parameter
1457      values to overlap the return and call.  We don't care about the
1458      dependence cost when only decreasing register pressure.  */
1459   if (recog_memoized (used) < 0)
1460     {
1461       cost = 0;
1462       recog_memoized (insn);
1463     }
1464   else
1465     {
1466       enum reg_note dep_type = DEP_TYPE (link);
1467 
1468       cost = insn_cost (insn);
1469 
1470       if (INSN_CODE (insn) >= 0)
1471 	{
1472 	  if (dep_type == REG_DEP_ANTI)
1473 	    cost = 0;
1474 	  else if (dep_type == REG_DEP_OUTPUT)
1475 	    {
1476 	      cost = (insn_default_latency (insn)
1477 		      - insn_default_latency (used));
1478 	      if (cost <= 0)
1479 		cost = 1;
1480 	    }
1481 	  else if (bypass_p (insn))
1482 	    cost = insn_latency (insn, used);
1483 	}
1484 
1485 
1486       if (targetm.sched.adjust_cost_2)
1487 	cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
1488 					    dw);
1489       else if (targetm.sched.adjust_cost != NULL)
1490 	{
1491 	  /* This variable is used for backward compatibility with the
1492 	     targets.  */
1493 	  rtx_insn_list *dep_cost_rtx_link =
1494 	    alloc_INSN_LIST (NULL_RTX, NULL);
1495 
1496 	  /* Make it self-cycled, so that if some tries to walk over this
1497 	     incomplete list he/she will be caught in an endless loop.  */
1498 	  XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
1499 
1500 	  /* Targets use only REG_NOTE_KIND of the link.  */
1501 	  PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
1502 
1503 	  cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
1504 					    insn, cost);
1505 
1506 	  free_INSN_LIST_node (dep_cost_rtx_link);
1507 	}
1508 
1509       if (cost < 0)
1510 	cost = 0;
1511     }
1512 
1513   DEP_COST (link) = cost;
1514   return cost;
1515 }
1516 
1517 /* Compute cost of dependence LINK.
1518    This is the number of cycles between instruction issue and
1519    instruction results.  */
1520 int
dep_cost(dep_t link)1521 dep_cost (dep_t link)
1522 {
1523   return dep_cost_1 (link, 0);
1524 }
1525 
1526 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
1527    INSN_PRIORITY explicitly.  */
1528 void
increase_insn_priority(rtx_insn * insn,int amount)1529 increase_insn_priority (rtx_insn *insn, int amount)
1530 {
1531   if (!sel_sched_p ())
1532     {
1533       /* We're dealing with haifa-sched.c INSN_PRIORITY.  */
1534       if (INSN_PRIORITY_KNOWN (insn))
1535 	  INSN_PRIORITY (insn) += amount;
1536     }
1537   else
1538     {
1539       /* In sel-sched.c INSN_PRIORITY is not kept up to date.
1540 	 Use EXPR_PRIORITY instead. */
1541       sel_add_to_insn_priority (insn, amount);
1542     }
1543 }
1544 
1545 /* Return 'true' if DEP should be included in priority calculations.  */
1546 static bool
contributes_to_priority_p(dep_t dep)1547 contributes_to_priority_p (dep_t dep)
1548 {
1549   if (DEBUG_INSN_P (DEP_CON (dep))
1550       || DEBUG_INSN_P (DEP_PRO (dep)))
1551     return false;
1552 
1553   /* Critical path is meaningful in block boundaries only.  */
1554   if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
1555 						    DEP_PRO (dep)))
1556     return false;
1557 
1558   if (DEP_REPLACE (dep) != NULL)
1559     return false;
1560 
1561   /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
1562      then speculative instructions will less likely be
1563      scheduled.  That is because the priority of
1564      their producers will increase, and, thus, the
1565      producers will more likely be scheduled, thus,
1566      resolving the dependence.  */
1567   if (sched_deps_info->generate_spec_deps
1568       && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
1569       && (DEP_STATUS (dep) & SPECULATIVE))
1570     return false;
1571 
1572   return true;
1573 }
1574 
1575 /* Compute the number of nondebug deps in list LIST for INSN.  */
1576 
1577 static int
dep_list_size(rtx_insn * insn,sd_list_types_def list)1578 dep_list_size (rtx_insn *insn, sd_list_types_def list)
1579 {
1580   sd_iterator_def sd_it;
1581   dep_t dep;
1582   int dbgcount = 0, nodbgcount = 0;
1583 
1584   if (!MAY_HAVE_DEBUG_INSNS)
1585     return sd_lists_size (insn, list);
1586 
1587   FOR_EACH_DEP (insn, list, sd_it, dep)
1588     {
1589       if (DEBUG_INSN_P (DEP_CON (dep)))
1590 	dbgcount++;
1591       else if (!DEBUG_INSN_P (DEP_PRO (dep)))
1592 	nodbgcount++;
1593     }
1594 
1595   gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, list));
1596 
1597   return nodbgcount;
1598 }
1599 
1600 bool sched_fusion;
1601 
1602 /* Compute the priority number for INSN.  */
1603 static int
priority(rtx_insn * insn)1604 priority (rtx_insn *insn)
1605 {
1606   if (! INSN_P (insn))
1607     return 0;
1608 
1609   /* We should not be interested in priority of an already scheduled insn.  */
1610   gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
1611 
1612   if (!INSN_PRIORITY_KNOWN (insn))
1613     {
1614       int this_priority = -1;
1615 
1616       if (sched_fusion)
1617 	{
1618 	  int this_fusion_priority;
1619 
1620 	  targetm.sched.fusion_priority (insn, FUSION_MAX_PRIORITY,
1621 					 &this_fusion_priority, &this_priority);
1622 	  INSN_FUSION_PRIORITY (insn) = this_fusion_priority;
1623 	}
1624       else if (dep_list_size (insn, SD_LIST_FORW) == 0)
1625 	/* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1626 	   some forward deps but all of them are ignored by
1627 	   contributes_to_priority hook.  At the moment we set priority of
1628 	   such insn to 0.  */
1629 	this_priority = insn_cost (insn);
1630       else
1631 	{
1632 	  rtx_insn *prev_first, *twin;
1633 	  basic_block rec;
1634 
1635 	  /* For recovery check instructions we calculate priority slightly
1636 	     different than that of normal instructions.  Instead of walking
1637 	     through INSN_FORW_DEPS (check) list, we walk through
1638 	     INSN_FORW_DEPS list of each instruction in the corresponding
1639 	     recovery block.  */
1640 
1641           /* Selective scheduling does not define RECOVERY_BLOCK macro.  */
1642 	  rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1643 	  if (!rec || rec == EXIT_BLOCK_PTR_FOR_FN (cfun))
1644 	    {
1645 	      prev_first = PREV_INSN (insn);
1646 	      twin = insn;
1647 	    }
1648 	  else
1649 	    {
1650 	      prev_first = NEXT_INSN (BB_HEAD (rec));
1651 	      twin = PREV_INSN (BB_END (rec));
1652 	    }
1653 
1654 	  do
1655 	    {
1656 	      sd_iterator_def sd_it;
1657 	      dep_t dep;
1658 
1659 	      FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1660 		{
1661 		  rtx_insn *next;
1662 		  int next_priority;
1663 
1664 		  next = DEP_CON (dep);
1665 
1666 		  if (BLOCK_FOR_INSN (next) != rec)
1667 		    {
1668 		      int cost;
1669 
1670 		      if (!contributes_to_priority_p (dep))
1671 			continue;
1672 
1673 		      if (twin == insn)
1674 			cost = dep_cost (dep);
1675 		      else
1676 			{
1677 			  struct _dep _dep1, *dep1 = &_dep1;
1678 
1679 			  init_dep (dep1, insn, next, REG_DEP_ANTI);
1680 
1681 			  cost = dep_cost (dep1);
1682 			}
1683 
1684 		      next_priority = cost + priority (next);
1685 
1686 		      if (next_priority > this_priority)
1687 			this_priority = next_priority;
1688 		    }
1689 		}
1690 
1691 	      twin = PREV_INSN (twin);
1692 	    }
1693 	  while (twin != prev_first);
1694 	}
1695 
1696       if (this_priority < 0)
1697 	{
1698 	  gcc_assert (this_priority == -1);
1699 
1700 	  this_priority = insn_cost (insn);
1701 	}
1702 
1703       INSN_PRIORITY (insn) = this_priority;
1704       INSN_PRIORITY_STATUS (insn) = 1;
1705     }
1706 
1707   return INSN_PRIORITY (insn);
1708 }
1709 
1710 /* Macros and functions for keeping the priority queue sorted, and
1711    dealing with queuing and dequeuing of instructions.  */
1712 
1713 /* For each pressure class CL, set DEATH[CL] to the number of registers
1714    in that class that die in INSN.  */
1715 
1716 static void
calculate_reg_deaths(rtx_insn * insn,int * death)1717 calculate_reg_deaths (rtx_insn *insn, int *death)
1718 {
1719   int i;
1720   struct reg_use_data *use;
1721 
1722   for (i = 0; i < ira_pressure_classes_num; i++)
1723     death[ira_pressure_classes[i]] = 0;
1724   for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1725     if (dying_use_p (use))
1726       mark_regno_birth_or_death (0, death, use->regno, true);
1727 }
1728 
1729 /* Setup info about the current register pressure impact of scheduling
1730    INSN at the current scheduling point.  */
1731 static void
setup_insn_reg_pressure_info(rtx_insn * insn)1732 setup_insn_reg_pressure_info (rtx_insn *insn)
1733 {
1734   int i, change, before, after, hard_regno;
1735   int excess_cost_change;
1736   machine_mode mode;
1737   enum reg_class cl;
1738   struct reg_pressure_data *pressure_info;
1739   int *max_reg_pressure;
1740   static int death[N_REG_CLASSES];
1741 
1742   gcc_checking_assert (!DEBUG_INSN_P (insn));
1743 
1744   excess_cost_change = 0;
1745   calculate_reg_deaths (insn, death);
1746   pressure_info = INSN_REG_PRESSURE (insn);
1747   max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1748   gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1749   for (i = 0; i < ira_pressure_classes_num; i++)
1750     {
1751       cl = ira_pressure_classes[i];
1752       gcc_assert (curr_reg_pressure[cl] >= 0);
1753       change = (int) pressure_info[i].set_increase - death[cl];
1754       before = MAX (0, max_reg_pressure[i] - sched_class_regs_num[cl]);
1755       after = MAX (0, max_reg_pressure[i] + change
1756 		   - sched_class_regs_num[cl]);
1757       hard_regno = ira_class_hard_regs[cl][0];
1758       gcc_assert (hard_regno >= 0);
1759       mode = reg_raw_mode[hard_regno];
1760       excess_cost_change += ((after - before)
1761 			     * (ira_memory_move_cost[mode][cl][0]
1762 				+ ira_memory_move_cost[mode][cl][1]));
1763     }
1764   INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1765 }
1766 
1767 /* This is the first page of code related to SCHED_PRESSURE_MODEL.
1768    It tries to make the scheduler take register pressure into account
1769    without introducing too many unnecessary stalls.  It hooks into the
1770    main scheduling algorithm at several points:
1771 
1772     - Before scheduling starts, model_start_schedule constructs a
1773       "model schedule" for the current block.  This model schedule is
1774       chosen solely to keep register pressure down.  It does not take the
1775       target's pipeline or the original instruction order into account,
1776       except as a tie-breaker.  It also doesn't work to a particular
1777       pressure limit.
1778 
1779       This model schedule gives us an idea of what pressure can be
1780       achieved for the block and gives us an example of a schedule that
1781       keeps to that pressure.  It also makes the final schedule less
1782       dependent on the original instruction order.  This is important
1783       because the original order can either be "wide" (many values live
1784       at once, such as in user-scheduled code) or "narrow" (few values
1785       live at once, such as after loop unrolling, where several
1786       iterations are executed sequentially).
1787 
1788       We do not apply this model schedule to the rtx stream.  We simply
1789       record it in model_schedule.  We also compute the maximum pressure,
1790       MP, that was seen during this schedule.
1791 
1792     - Instructions are added to the ready queue even if they require
1793       a stall.  The length of the stall is instead computed as:
1794 
1795 	 MAX (INSN_TICK (INSN) - clock_var, 0)
1796 
1797       (= insn_delay).  This allows rank_for_schedule to choose between
1798       introducing a deliberate stall or increasing pressure.
1799 
1800     - Before sorting the ready queue, model_set_excess_costs assigns
1801       a pressure-based cost to each ready instruction in the queue.
1802       This is the instruction's INSN_REG_PRESSURE_EXCESS_COST_CHANGE
1803       (ECC for short) and is effectively measured in cycles.
1804 
1805     - rank_for_schedule ranks instructions based on:
1806 
1807 	ECC (insn) + insn_delay (insn)
1808 
1809       then as:
1810 
1811 	insn_delay (insn)
1812 
1813       So, for example, an instruction X1 with an ECC of 1 that can issue
1814       now will win over an instruction X0 with an ECC of zero that would
1815       introduce a stall of one cycle.  However, an instruction X2 with an
1816       ECC of 2 that can issue now will lose to both X0 and X1.
1817 
1818     - When an instruction is scheduled, model_recompute updates the model
1819       schedule with the new pressures (some of which might now exceed the
1820       original maximum pressure MP).  model_update_limit_points then searches
1821       for the new point of maximum pressure, if not already known.  */
1822 
1823 /* Used to separate high-verbosity debug information for SCHED_PRESSURE_MODEL
1824    from surrounding debug information.  */
1825 #define MODEL_BAR \
1826   ";;\t\t+------------------------------------------------------\n"
1827 
1828 /* Information about the pressure on a particular register class at a
1829    particular point of the model schedule.  */
1830 struct model_pressure_data {
1831   /* The pressure at this point of the model schedule, or -1 if the
1832      point is associated with an instruction that has already been
1833      scheduled.  */
1834   int ref_pressure;
1835 
1836   /* The maximum pressure during or after this point of the model schedule.  */
1837   int max_pressure;
1838 };
1839 
1840 /* Per-instruction information that is used while building the model
1841    schedule.  Here, "schedule" refers to the model schedule rather
1842    than the main schedule.  */
1843 struct model_insn_info {
1844   /* The instruction itself.  */
1845   rtx_insn *insn;
1846 
1847   /* If this instruction is in model_worklist, these fields link to the
1848      previous (higher-priority) and next (lower-priority) instructions
1849      in the list.  */
1850   struct model_insn_info *prev;
1851   struct model_insn_info *next;
1852 
1853   /* While constructing the schedule, QUEUE_INDEX describes whether an
1854      instruction has already been added to the schedule (QUEUE_SCHEDULED),
1855      is in model_worklist (QUEUE_READY), or neither (QUEUE_NOWHERE).
1856      old_queue records the value that QUEUE_INDEX had before scheduling
1857      started, so that we can restore it once the schedule is complete.  */
1858   int old_queue;
1859 
1860   /* The relative importance of an unscheduled instruction.  Higher
1861      values indicate greater importance.  */
1862   unsigned int model_priority;
1863 
1864   /* The length of the longest path of satisfied true dependencies
1865      that leads to this instruction.  */
1866   unsigned int depth;
1867 
1868   /* The length of the longest path of dependencies of any kind
1869      that leads from this instruction.  */
1870   unsigned int alap;
1871 
1872   /* The number of predecessor nodes that must still be scheduled.  */
1873   int unscheduled_preds;
1874 };
1875 
1876 /* Information about the pressure limit for a particular register class.
1877    This structure is used when applying a model schedule to the main
1878    schedule.  */
1879 struct model_pressure_limit {
1880   /* The maximum register pressure seen in the original model schedule.  */
1881   int orig_pressure;
1882 
1883   /* The maximum register pressure seen in the current model schedule
1884      (which excludes instructions that have already been scheduled).  */
1885   int pressure;
1886 
1887   /* The point of the current model schedule at which PRESSURE is first
1888      reached.  It is set to -1 if the value needs to be recomputed.  */
1889   int point;
1890 };
1891 
1892 /* Describes a particular way of measuring register pressure.  */
1893 struct model_pressure_group {
1894   /* Index PCI describes the maximum pressure on ira_pressure_classes[PCI].  */
1895   struct model_pressure_limit limits[N_REG_CLASSES];
1896 
1897   /* Index (POINT * ira_num_pressure_classes + PCI) describes the pressure
1898      on register class ira_pressure_classes[PCI] at point POINT of the
1899      current model schedule.  A POINT of model_num_insns describes the
1900      pressure at the end of the schedule.  */
1901   struct model_pressure_data *model;
1902 };
1903 
1904 /* Index POINT gives the instruction at point POINT of the model schedule.
1905    This array doesn't change during main scheduling.  */
1906 static vec<rtx_insn *> model_schedule;
1907 
1908 /* The list of instructions in the model worklist, sorted in order of
1909    decreasing priority.  */
1910 static struct model_insn_info *model_worklist;
1911 
1912 /* Index I describes the instruction with INSN_LUID I.  */
1913 static struct model_insn_info *model_insns;
1914 
1915 /* The number of instructions in the model schedule.  */
1916 static int model_num_insns;
1917 
1918 /* The index of the first instruction in model_schedule that hasn't yet been
1919    added to the main schedule, or model_num_insns if all of them have.  */
1920 static int model_curr_point;
1921 
1922 /* Describes the pressure before each instruction in the model schedule.  */
1923 static struct model_pressure_group model_before_pressure;
1924 
1925 /* The first unused model_priority value (as used in model_insn_info).  */
1926 static unsigned int model_next_priority;
1927 
1928 
1929 /* The model_pressure_data for ira_pressure_classes[PCI] in GROUP
1930    at point POINT of the model schedule.  */
1931 #define MODEL_PRESSURE_DATA(GROUP, POINT, PCI) \
1932   (&(GROUP)->model[(POINT) * ira_pressure_classes_num + (PCI)])
1933 
1934 /* The maximum pressure on ira_pressure_classes[PCI] in GROUP at or
1935    after point POINT of the model schedule.  */
1936 #define MODEL_MAX_PRESSURE(GROUP, POINT, PCI) \
1937   (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->max_pressure)
1938 
1939 /* The pressure on ira_pressure_classes[PCI] in GROUP at point POINT
1940    of the model schedule.  */
1941 #define MODEL_REF_PRESSURE(GROUP, POINT, PCI) \
1942   (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->ref_pressure)
1943 
1944 /* Information about INSN that is used when creating the model schedule.  */
1945 #define MODEL_INSN_INFO(INSN) \
1946   (&model_insns[INSN_LUID (INSN)])
1947 
1948 /* The instruction at point POINT of the model schedule.  */
1949 #define MODEL_INSN(POINT) \
1950   (model_schedule[POINT])
1951 
1952 
1953 /* Return INSN's index in the model schedule, or model_num_insns if it
1954    doesn't belong to that schedule.  */
1955 
1956 static int
model_index(rtx_insn * insn)1957 model_index (rtx_insn *insn)
1958 {
1959   if (INSN_MODEL_INDEX (insn) == 0)
1960     return model_num_insns;
1961   return INSN_MODEL_INDEX (insn) - 1;
1962 }
1963 
1964 /* Make sure that GROUP->limits is up-to-date for the current point
1965    of the model schedule.  */
1966 
1967 static void
model_update_limit_points_in_group(struct model_pressure_group * group)1968 model_update_limit_points_in_group (struct model_pressure_group *group)
1969 {
1970   int pci, max_pressure, point;
1971 
1972   for (pci = 0; pci < ira_pressure_classes_num; pci++)
1973     {
1974       /* We may have passed the final point at which the pressure in
1975 	 group->limits[pci].pressure was reached.  Update the limit if so.  */
1976       max_pressure = MODEL_MAX_PRESSURE (group, model_curr_point, pci);
1977       group->limits[pci].pressure = max_pressure;
1978 
1979       /* Find the point at which MAX_PRESSURE is first reached.  We need
1980 	 to search in three cases:
1981 
1982 	 - We've already moved past the previous pressure point.
1983 	   In this case we search forward from model_curr_point.
1984 
1985 	 - We scheduled the previous point of maximum pressure ahead of
1986 	   its position in the model schedule, but doing so didn't bring
1987 	   the pressure point earlier.  In this case we search forward
1988 	   from that previous pressure point.
1989 
1990 	 - Scheduling an instruction early caused the maximum pressure
1991 	   to decrease.  In this case we will have set the pressure
1992 	   point to -1, and we search forward from model_curr_point.  */
1993       point = MAX (group->limits[pci].point, model_curr_point);
1994       while (point < model_num_insns
1995 	     && MODEL_REF_PRESSURE (group, point, pci) < max_pressure)
1996 	point++;
1997       group->limits[pci].point = point;
1998 
1999       gcc_assert (MODEL_REF_PRESSURE (group, point, pci) == max_pressure);
2000       gcc_assert (MODEL_MAX_PRESSURE (group, point, pci) == max_pressure);
2001     }
2002 }
2003 
2004 /* Make sure that all register-pressure limits are up-to-date for the
2005    current position in the model schedule.  */
2006 
2007 static void
model_update_limit_points(void)2008 model_update_limit_points (void)
2009 {
2010   model_update_limit_points_in_group (&model_before_pressure);
2011 }
2012 
2013 /* Return the model_index of the last unscheduled use in chain USE
2014    outside of USE's instruction.  Return -1 if there are no other uses,
2015    or model_num_insns if the register is live at the end of the block.  */
2016 
2017 static int
model_last_use_except(struct reg_use_data * use)2018 model_last_use_except (struct reg_use_data *use)
2019 {
2020   struct reg_use_data *next;
2021   int last, index;
2022 
2023   last = -1;
2024   for (next = use->next_regno_use; next != use; next = next->next_regno_use)
2025     if (NONDEBUG_INSN_P (next->insn)
2026 	&& QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
2027       {
2028 	index = model_index (next->insn);
2029 	if (index == model_num_insns)
2030 	  return model_num_insns;
2031 	if (last < index)
2032 	  last = index;
2033       }
2034   return last;
2035 }
2036 
2037 /* An instruction with model_index POINT has just been scheduled, and it
2038    adds DELTA to the pressure on ira_pressure_classes[PCI] after POINT - 1.
2039    Update MODEL_REF_PRESSURE (GROUP, POINT, PCI) and
2040    MODEL_MAX_PRESSURE (GROUP, POINT, PCI) accordingly.  */
2041 
2042 static void
model_start_update_pressure(struct model_pressure_group * group,int point,int pci,int delta)2043 model_start_update_pressure (struct model_pressure_group *group,
2044 			     int point, int pci, int delta)
2045 {
2046   int next_max_pressure;
2047 
2048   if (point == model_num_insns)
2049     {
2050       /* The instruction wasn't part of the model schedule; it was moved
2051 	 from a different block.  Update the pressure for the end of
2052 	 the model schedule.  */
2053       MODEL_REF_PRESSURE (group, point, pci) += delta;
2054       MODEL_MAX_PRESSURE (group, point, pci) += delta;
2055     }
2056   else
2057     {
2058       /* Record that this instruction has been scheduled.  Nothing now
2059 	 changes between POINT and POINT + 1, so get the maximum pressure
2060 	 from the latter.  If the maximum pressure decreases, the new
2061 	 pressure point may be before POINT.  */
2062       MODEL_REF_PRESSURE (group, point, pci) = -1;
2063       next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2064       if (MODEL_MAX_PRESSURE (group, point, pci) > next_max_pressure)
2065 	{
2066 	  MODEL_MAX_PRESSURE (group, point, pci) = next_max_pressure;
2067 	  if (group->limits[pci].point == point)
2068 	    group->limits[pci].point = -1;
2069 	}
2070     }
2071 }
2072 
2073 /* Record that scheduling a later instruction has changed the pressure
2074    at point POINT of the model schedule by DELTA (which might be 0).
2075    Update GROUP accordingly.  Return nonzero if these changes might
2076    trigger changes to previous points as well.  */
2077 
2078 static int
model_update_pressure(struct model_pressure_group * group,int point,int pci,int delta)2079 model_update_pressure (struct model_pressure_group *group,
2080 		       int point, int pci, int delta)
2081 {
2082   int ref_pressure, max_pressure, next_max_pressure;
2083 
2084   /* If POINT hasn't yet been scheduled, update its pressure.  */
2085   ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
2086   if (ref_pressure >= 0 && delta != 0)
2087     {
2088       ref_pressure += delta;
2089       MODEL_REF_PRESSURE (group, point, pci) = ref_pressure;
2090 
2091       /* Check whether the maximum pressure in the overall schedule
2092 	 has increased.  (This means that the MODEL_MAX_PRESSURE of
2093 	 every point <= POINT will need to increase too; see below.)  */
2094       if (group->limits[pci].pressure < ref_pressure)
2095 	group->limits[pci].pressure = ref_pressure;
2096 
2097       /* If we are at maximum pressure, and the maximum pressure
2098 	 point was previously unknown or later than POINT,
2099 	 bring it forward.  */
2100       if (group->limits[pci].pressure == ref_pressure
2101 	  && !IN_RANGE (group->limits[pci].point, 0, point))
2102 	group->limits[pci].point = point;
2103 
2104       /* If POINT used to be the point of maximum pressure, but isn't
2105 	 any longer, we need to recalculate it using a forward walk.  */
2106       if (group->limits[pci].pressure > ref_pressure
2107 	  && group->limits[pci].point == point)
2108 	group->limits[pci].point = -1;
2109     }
2110 
2111   /* Update the maximum pressure at POINT.  Changes here might also
2112      affect the maximum pressure at POINT - 1.  */
2113   next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2114   max_pressure = MAX (ref_pressure, next_max_pressure);
2115   if (MODEL_MAX_PRESSURE (group, point, pci) != max_pressure)
2116     {
2117       MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
2118       return 1;
2119     }
2120   return 0;
2121 }
2122 
2123 /* INSN has just been scheduled.  Update the model schedule accordingly.  */
2124 
2125 static void
model_recompute(rtx_insn * insn)2126 model_recompute (rtx_insn *insn)
2127 {
2128   struct {
2129     int last_use;
2130     int regno;
2131   } uses[FIRST_PSEUDO_REGISTER + MAX_RECOG_OPERANDS];
2132   struct reg_use_data *use;
2133   struct reg_pressure_data *reg_pressure;
2134   int delta[N_REG_CLASSES];
2135   int pci, point, mix, new_last, cl, ref_pressure, queue;
2136   unsigned int i, num_uses, num_pending_births;
2137   bool print_p;
2138 
2139   /* The destinations of INSN were previously live from POINT onwards, but are
2140      now live from model_curr_point onwards.  Set up DELTA accordingly.  */
2141   point = model_index (insn);
2142   reg_pressure = INSN_REG_PRESSURE (insn);
2143   for (pci = 0; pci < ira_pressure_classes_num; pci++)
2144     {
2145       cl = ira_pressure_classes[pci];
2146       delta[cl] = reg_pressure[pci].set_increase;
2147     }
2148 
2149   /* Record which registers previously died at POINT, but which now die
2150      before POINT.  Adjust DELTA so that it represents the effect of
2151      this change after POINT - 1.  Set NUM_PENDING_BIRTHS to the number of
2152      registers that will be born in the range [model_curr_point, POINT).  */
2153   num_uses = 0;
2154   num_pending_births = 0;
2155   for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2156     {
2157       new_last = model_last_use_except (use);
2158       if (new_last < point)
2159 	{
2160 	  gcc_assert (num_uses < ARRAY_SIZE (uses));
2161 	  uses[num_uses].last_use = new_last;
2162 	  uses[num_uses].regno = use->regno;
2163 	  /* This register is no longer live after POINT - 1.  */
2164 	  mark_regno_birth_or_death (NULL, delta, use->regno, false);
2165 	  num_uses++;
2166 	  if (new_last >= 0)
2167 	    num_pending_births++;
2168 	}
2169     }
2170 
2171   /* Update the MODEL_REF_PRESSURE and MODEL_MAX_PRESSURE for POINT.
2172      Also set each group pressure limit for POINT.  */
2173   for (pci = 0; pci < ira_pressure_classes_num; pci++)
2174     {
2175       cl = ira_pressure_classes[pci];
2176       model_start_update_pressure (&model_before_pressure,
2177 				   point, pci, delta[cl]);
2178     }
2179 
2180   /* Walk the model schedule backwards, starting immediately before POINT.  */
2181   print_p = false;
2182   if (point != model_curr_point)
2183     do
2184       {
2185 	point--;
2186 	insn = MODEL_INSN (point);
2187 	queue = QUEUE_INDEX (insn);
2188 
2189 	if (queue != QUEUE_SCHEDULED)
2190 	  {
2191 	    /* DELTA describes the effect of the move on the register pressure
2192 	       after POINT.  Make it describe the effect on the pressure
2193 	       before POINT.  */
2194 	    i = 0;
2195 	    while (i < num_uses)
2196 	      {
2197 		if (uses[i].last_use == point)
2198 		  {
2199 		    /* This register is now live again.  */
2200 		    mark_regno_birth_or_death (NULL, delta,
2201 					       uses[i].regno, true);
2202 
2203 		    /* Remove this use from the array.  */
2204 		    uses[i] = uses[num_uses - 1];
2205 		    num_uses--;
2206 		    num_pending_births--;
2207 		  }
2208 		else
2209 		  i++;
2210 	      }
2211 
2212 	    if (sched_verbose >= 5)
2213 	      {
2214 		if (!print_p)
2215 		  {
2216 		    fprintf (sched_dump, MODEL_BAR);
2217 		    fprintf (sched_dump, ";;\t\t| New pressure for model"
2218 			     " schedule\n");
2219 		    fprintf (sched_dump, MODEL_BAR);
2220 		    print_p = true;
2221 		  }
2222 
2223 		fprintf (sched_dump, ";;\t\t| %3d %4d %-30s ",
2224 			 point, INSN_UID (insn),
2225 			 str_pattern_slim (PATTERN (insn)));
2226 		for (pci = 0; pci < ira_pressure_classes_num; pci++)
2227 		  {
2228 		    cl = ira_pressure_classes[pci];
2229 		    ref_pressure = MODEL_REF_PRESSURE (&model_before_pressure,
2230 						       point, pci);
2231 		    fprintf (sched_dump, " %s:[%d->%d]",
2232 			     reg_class_names[ira_pressure_classes[pci]],
2233 			     ref_pressure, ref_pressure + delta[cl]);
2234 		  }
2235 		fprintf (sched_dump, "\n");
2236 	      }
2237 	  }
2238 
2239 	/* Adjust the pressure at POINT.  Set MIX to nonzero if POINT - 1
2240 	   might have changed as well.  */
2241 	mix = num_pending_births;
2242 	for (pci = 0; pci < ira_pressure_classes_num; pci++)
2243 	  {
2244 	    cl = ira_pressure_classes[pci];
2245 	    mix |= delta[cl];
2246 	    mix |= model_update_pressure (&model_before_pressure,
2247 					  point, pci, delta[cl]);
2248 	  }
2249       }
2250     while (mix && point > model_curr_point);
2251 
2252   if (print_p)
2253     fprintf (sched_dump, MODEL_BAR);
2254 }
2255 
2256 /* After DEP, which was cancelled, has been resolved for insn NEXT,
2257    check whether the insn's pattern needs restoring.  */
2258 static bool
must_restore_pattern_p(rtx_insn * next,dep_t dep)2259 must_restore_pattern_p (rtx_insn *next, dep_t dep)
2260 {
2261   if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
2262     return false;
2263 
2264   if (DEP_TYPE (dep) == REG_DEP_CONTROL)
2265     {
2266       gcc_assert (ORIG_PAT (next) != NULL_RTX);
2267       gcc_assert (next == DEP_CON (dep));
2268     }
2269   else
2270     {
2271       struct dep_replacement *desc = DEP_REPLACE (dep);
2272       if (desc->insn != next)
2273 	{
2274 	  gcc_assert (*desc->loc == desc->orig);
2275 	  return false;
2276 	}
2277     }
2278   return true;
2279 }
2280 
2281 /* model_spill_cost (CL, P, P') returns the cost of increasing the
2282    pressure on CL from P to P'.  We use this to calculate a "base ECC",
2283    baseECC (CL, X), for each pressure class CL and each instruction X.
2284    Supposing X changes the pressure on CL from P to P', and that the
2285    maximum pressure on CL in the current model schedule is MP', then:
2286 
2287    * if X occurs before or at the next point of maximum pressure in
2288      the model schedule and P' > MP', then:
2289 
2290        baseECC (CL, X) = model_spill_cost (CL, MP, P')
2291 
2292      The idea is that the pressure after scheduling a fixed set of
2293      instructions -- in this case, the set up to and including the
2294      next maximum pressure point -- is going to be the same regardless
2295      of the order; we simply want to keep the intermediate pressure
2296      under control.  Thus X has a cost of zero unless scheduling it
2297      now would exceed MP'.
2298 
2299      If all increases in the set are by the same amount, no zero-cost
2300      instruction will ever cause the pressure to exceed MP'.  However,
2301      if X is instead moved past an instruction X' with pressure in the
2302      range (MP' - (P' - P), MP'), the pressure at X' will increase
2303      beyond MP'.  Since baseECC is very much a heuristic anyway,
2304      it doesn't seem worth the overhead of tracking cases like these.
2305 
2306      The cost of exceeding MP' is always based on the original maximum
2307      pressure MP.  This is so that going 2 registers over the original
2308      limit has the same cost regardless of whether it comes from two
2309      separate +1 deltas or from a single +2 delta.
2310 
2311    * if X occurs after the next point of maximum pressure in the model
2312      schedule and P' > P, then:
2313 
2314        baseECC (CL, X) = model_spill_cost (CL, MP, MP' + (P' - P))
2315 
2316      That is, if we move X forward across a point of maximum pressure,
2317      and if X increases the pressure by P' - P, then we conservatively
2318      assume that scheduling X next would increase the maximum pressure
2319      by P' - P.  Again, the cost of doing this is based on the original
2320      maximum pressure MP, for the same reason as above.
2321 
2322    * if P' < P, P > MP, and X occurs at or after the next point of
2323      maximum pressure, then:
2324 
2325        baseECC (CL, X) = -model_spill_cost (CL, MAX (MP, P'), P)
2326 
2327      That is, if we have already exceeded the original maximum pressure MP,
2328      and if X might reduce the maximum pressure again -- or at least push
2329      it further back, and thus allow more scheduling freedom -- it is given
2330      a negative cost to reflect the improvement.
2331 
2332    * otherwise,
2333 
2334        baseECC (CL, X) = 0
2335 
2336      In this case, X is not expected to affect the maximum pressure MP',
2337      so it has zero cost.
2338 
2339    We then create a combined value baseECC (X) that is the sum of
2340    baseECC (CL, X) for each pressure class CL.
2341 
2342    baseECC (X) could itself be used as the ECC value described above.
2343    However, this is often too conservative, in the sense that it
2344    tends to make high-priority instructions that increase pressure
2345    wait too long in cases where introducing a spill would be better.
2346    For this reason the final ECC is a priority-adjusted form of
2347    baseECC (X).  Specifically, we calculate:
2348 
2349      P (X) = INSN_PRIORITY (X) - insn_delay (X) - baseECC (X)
2350      baseP = MAX { P (X) | baseECC (X) <= 0 }
2351 
2352    Then:
2353 
2354      ECC (X) = MAX (MIN (baseP - P (X), baseECC (X)), 0)
2355 
2356    Thus an instruction's effect on pressure is ignored if it has a high
2357    enough priority relative to the ones that don't increase pressure.
2358    Negative values of baseECC (X) do not increase the priority of X
2359    itself, but they do make it harder for other instructions to
2360    increase the pressure further.
2361 
2362    This pressure cost is deliberately timid.  The intention has been
2363    to choose a heuristic that rarely interferes with the normal list
2364    scheduler in cases where that scheduler would produce good code.
2365    We simply want to curb some of its worst excesses.  */
2366 
2367 /* Return the cost of increasing the pressure in class CL from FROM to TO.
2368 
2369    Here we use the very simplistic cost model that every register above
2370    sched_class_regs_num[CL] has a spill cost of 1.  We could use other
2371    measures instead, such as one based on MEMORY_MOVE_COST.  However:
2372 
2373       (1) In order for an instruction to be scheduled, the higher cost
2374 	  would need to be justified in a single saving of that many stalls.
2375 	  This is overly pessimistic, because the benefit of spilling is
2376 	  often to avoid a sequence of several short stalls rather than
2377 	  a single long one.
2378 
2379       (2) The cost is still arbitrary.  Because we are not allocating
2380 	  registers during scheduling, we have no way of knowing for
2381 	  sure how many memory accesses will be required by each spill,
2382 	  where the spills will be placed within the block, or even
2383 	  which block(s) will contain the spills.
2384 
2385    So a higher cost than 1 is often too conservative in practice,
2386    forcing blocks to contain unnecessary stalls instead of spill code.
2387    The simple cost below seems to be the best compromise.  It reduces
2388    the interference with the normal list scheduler, which helps make
2389    it more suitable for a default-on option.  */
2390 
2391 static int
model_spill_cost(int cl,int from,int to)2392 model_spill_cost (int cl, int from, int to)
2393 {
2394   from = MAX (from, sched_class_regs_num[cl]);
2395   return MAX (to, from) - from;
2396 }
2397 
2398 /* Return baseECC (ira_pressure_classes[PCI], POINT), given that
2399    P = curr_reg_pressure[ira_pressure_classes[PCI]] and that
2400    P' = P + DELTA.  */
2401 
2402 static int
model_excess_group_cost(struct model_pressure_group * group,int point,int pci,int delta)2403 model_excess_group_cost (struct model_pressure_group *group,
2404 			 int point, int pci, int delta)
2405 {
2406   int pressure, cl;
2407 
2408   cl = ira_pressure_classes[pci];
2409   if (delta < 0 && point >= group->limits[pci].point)
2410     {
2411       pressure = MAX (group->limits[pci].orig_pressure,
2412 		      curr_reg_pressure[cl] + delta);
2413       return -model_spill_cost (cl, pressure, curr_reg_pressure[cl]);
2414     }
2415 
2416   if (delta > 0)
2417     {
2418       if (point > group->limits[pci].point)
2419 	pressure = group->limits[pci].pressure + delta;
2420       else
2421 	pressure = curr_reg_pressure[cl] + delta;
2422 
2423       if (pressure > group->limits[pci].pressure)
2424 	return model_spill_cost (cl, group->limits[pci].orig_pressure,
2425 				 pressure);
2426     }
2427 
2428   return 0;
2429 }
2430 
2431 /* Return baseECC (MODEL_INSN (INSN)).  Dump the costs to sched_dump
2432    if PRINT_P.  */
2433 
2434 static int
model_excess_cost(rtx_insn * insn,bool print_p)2435 model_excess_cost (rtx_insn *insn, bool print_p)
2436 {
2437   int point, pci, cl, cost, this_cost, delta;
2438   struct reg_pressure_data *insn_reg_pressure;
2439   int insn_death[N_REG_CLASSES];
2440 
2441   calculate_reg_deaths (insn, insn_death);
2442   point = model_index (insn);
2443   insn_reg_pressure = INSN_REG_PRESSURE (insn);
2444   cost = 0;
2445 
2446   if (print_p)
2447     fprintf (sched_dump, ";;\t\t| %3d %4d | %4d %+3d |", point,
2448 	     INSN_UID (insn), INSN_PRIORITY (insn), insn_delay (insn));
2449 
2450   /* Sum up the individual costs for each register class.  */
2451   for (pci = 0; pci < ira_pressure_classes_num; pci++)
2452     {
2453       cl = ira_pressure_classes[pci];
2454       delta = insn_reg_pressure[pci].set_increase - insn_death[cl];
2455       this_cost = model_excess_group_cost (&model_before_pressure,
2456 					   point, pci, delta);
2457       cost += this_cost;
2458       if (print_p)
2459 	fprintf (sched_dump, " %s:[%d base cost %d]",
2460 		 reg_class_names[cl], delta, this_cost);
2461     }
2462 
2463   if (print_p)
2464     fprintf (sched_dump, "\n");
2465 
2466   return cost;
2467 }
2468 
2469 /* Dump the next points of maximum pressure for GROUP.  */
2470 
2471 static void
model_dump_pressure_points(struct model_pressure_group * group)2472 model_dump_pressure_points (struct model_pressure_group *group)
2473 {
2474   int pci, cl;
2475 
2476   fprintf (sched_dump, ";;\t\t|  pressure points");
2477   for (pci = 0; pci < ira_pressure_classes_num; pci++)
2478     {
2479       cl = ira_pressure_classes[pci];
2480       fprintf (sched_dump, " %s:[%d->%d at ", reg_class_names[cl],
2481 	       curr_reg_pressure[cl], group->limits[pci].pressure);
2482       if (group->limits[pci].point < model_num_insns)
2483 	fprintf (sched_dump, "%d:%d]", group->limits[pci].point,
2484 		 INSN_UID (MODEL_INSN (group->limits[pci].point)));
2485       else
2486 	fprintf (sched_dump, "end]");
2487     }
2488   fprintf (sched_dump, "\n");
2489 }
2490 
2491 /* Set INSN_REG_PRESSURE_EXCESS_COST_CHANGE for INSNS[0...COUNT-1].  */
2492 
2493 static void
model_set_excess_costs(rtx_insn ** insns,int count)2494 model_set_excess_costs (rtx_insn **insns, int count)
2495 {
2496   int i, cost, priority_base, priority;
2497   bool print_p;
2498 
2499   /* Record the baseECC value for each instruction in the model schedule,
2500      except that negative costs are converted to zero ones now rather than
2501      later.  Do not assign a cost to debug instructions, since they must
2502      not change code-generation decisions.  Experiments suggest we also
2503      get better results by not assigning a cost to instructions from
2504      a different block.
2505 
2506      Set PRIORITY_BASE to baseP in the block comment above.  This is the
2507      maximum priority of the "cheap" instructions, which should always
2508      include the next model instruction.  */
2509   priority_base = 0;
2510   print_p = false;
2511   for (i = 0; i < count; i++)
2512     if (INSN_MODEL_INDEX (insns[i]))
2513       {
2514 	if (sched_verbose >= 6 && !print_p)
2515 	  {
2516 	    fprintf (sched_dump, MODEL_BAR);
2517 	    fprintf (sched_dump, ";;\t\t| Pressure costs for ready queue\n");
2518 	    model_dump_pressure_points (&model_before_pressure);
2519 	    fprintf (sched_dump, MODEL_BAR);
2520 	    print_p = true;
2521 	  }
2522 	cost = model_excess_cost (insns[i], print_p);
2523 	if (cost <= 0)
2524 	  {
2525 	    priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]) - cost;
2526 	    priority_base = MAX (priority_base, priority);
2527 	    cost = 0;
2528 	  }
2529 	INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = cost;
2530       }
2531   if (print_p)
2532     fprintf (sched_dump, MODEL_BAR);
2533 
2534   /* Use MAX (baseECC, 0) and baseP to calculcate ECC for each
2535      instruction.  */
2536   for (i = 0; i < count; i++)
2537     {
2538       cost = INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]);
2539       priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]);
2540       if (cost > 0 && priority > priority_base)
2541 	{
2542 	  cost += priority_base - priority;
2543 	  INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = MAX (cost, 0);
2544 	}
2545     }
2546 }
2547 
2548 
2549 /* Enum of rank_for_schedule heuristic decisions.  */
2550 enum rfs_decision {
2551   RFS_LIVE_RANGE_SHRINK1, RFS_LIVE_RANGE_SHRINK2,
2552   RFS_SCHED_GROUP, RFS_PRESSURE_DELAY, RFS_PRESSURE_TICK,
2553   RFS_FEEDS_BACKTRACK_INSN, RFS_PRIORITY, RFS_SPECULATION,
2554   RFS_SCHED_RANK, RFS_LAST_INSN, RFS_PRESSURE_INDEX,
2555   RFS_DEP_COUNT, RFS_TIE, RFS_FUSION, RFS_N };
2556 
2557 /* Corresponding strings for print outs.  */
2558 static const char *rfs_str[RFS_N] = {
2559   "RFS_LIVE_RANGE_SHRINK1", "RFS_LIVE_RANGE_SHRINK2",
2560   "RFS_SCHED_GROUP", "RFS_PRESSURE_DELAY", "RFS_PRESSURE_TICK",
2561   "RFS_FEEDS_BACKTRACK_INSN", "RFS_PRIORITY", "RFS_SPECULATION",
2562   "RFS_SCHED_RANK", "RFS_LAST_INSN", "RFS_PRESSURE_INDEX",
2563   "RFS_DEP_COUNT", "RFS_TIE", "RFS_FUSION" };
2564 
2565 /* Statistical breakdown of rank_for_schedule decisions.  */
2566 struct rank_for_schedule_stats_t { unsigned stats[RFS_N]; };
2567 static rank_for_schedule_stats_t rank_for_schedule_stats;
2568 
2569 /* Return the result of comparing insns TMP and TMP2 and update
2570    Rank_For_Schedule statistics.  */
2571 static int
rfs_result(enum rfs_decision decision,int result,rtx tmp,rtx tmp2)2572 rfs_result (enum rfs_decision decision, int result, rtx tmp, rtx tmp2)
2573 {
2574   ++rank_for_schedule_stats.stats[decision];
2575   if (result < 0)
2576     INSN_LAST_RFS_WIN (tmp) = decision;
2577   else if (result > 0)
2578     INSN_LAST_RFS_WIN (tmp2) = decision;
2579   else
2580     gcc_unreachable ();
2581   return result;
2582 }
2583 
2584 /* Sorting predicate to move DEBUG_INSNs to the top of ready list, while
2585    keeping normal insns in original order.  */
2586 
2587 static int
rank_for_schedule_debug(const void * x,const void * y)2588 rank_for_schedule_debug (const void *x, const void *y)
2589 {
2590   rtx_insn *tmp = *(rtx_insn * const *) y;
2591   rtx_insn *tmp2 = *(rtx_insn * const *) x;
2592 
2593   /* Schedule debug insns as early as possible.  */
2594   if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
2595     return -1;
2596   else if (!DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2597     return 1;
2598   else if (DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2599     return INSN_LUID (tmp) - INSN_LUID (tmp2);
2600   else
2601     return INSN_RFS_DEBUG_ORIG_ORDER (tmp2) - INSN_RFS_DEBUG_ORIG_ORDER (tmp);
2602 }
2603 
2604 /* Returns a positive value if x is preferred; returns a negative value if
2605    y is preferred.  Should never return 0, since that will make the sort
2606    unstable.  */
2607 
2608 static int
rank_for_schedule(const void * x,const void * y)2609 rank_for_schedule (const void *x, const void *y)
2610 {
2611   rtx_insn *tmp = *(rtx_insn * const *) y;
2612   rtx_insn *tmp2 = *(rtx_insn * const *) x;
2613   int tmp_class, tmp2_class;
2614   int val, priority_val, info_val, diff;
2615 
2616   if (live_range_shrinkage_p)
2617     {
2618       /* Don't use SCHED_PRESSURE_MODEL -- it results in much worse
2619 	 code.  */
2620       gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
2621       if ((INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp) < 0
2622 	   || INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2) < 0)
2623 	  && (diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2624 		      - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2))) != 0)
2625 	return rfs_result (RFS_LIVE_RANGE_SHRINK1, diff, tmp, tmp2);
2626       /* Sort by INSN_LUID (original insn order), so that we make the
2627 	 sort stable.  This minimizes instruction movement, thus
2628 	 minimizing sched's effect on debugging and cross-jumping.  */
2629       return rfs_result (RFS_LIVE_RANGE_SHRINK2,
2630 			 INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2631     }
2632 
2633   /* The insn in a schedule group should be issued the first.  */
2634   if (flag_sched_group_heuristic &&
2635       SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
2636     return rfs_result (RFS_SCHED_GROUP, SCHED_GROUP_P (tmp2) ? 1 : -1,
2637 		       tmp, tmp2);
2638 
2639   /* Make sure that priority of TMP and TMP2 are initialized.  */
2640   gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
2641 
2642   if (sched_fusion)
2643     {
2644       /* The instruction that has the same fusion priority as the last
2645 	 instruction is the instruction we picked next.  If that is not
2646 	 the case, we sort ready list firstly by fusion priority, then
2647 	 by priority, and at last by INSN_LUID.  */
2648       int a = INSN_FUSION_PRIORITY (tmp);
2649       int b = INSN_FUSION_PRIORITY (tmp2);
2650       int last = -1;
2651 
2652       if (last_nondebug_scheduled_insn
2653 	  && !NOTE_P (last_nondebug_scheduled_insn)
2654 	  && BLOCK_FOR_INSN (tmp)
2655 	       == BLOCK_FOR_INSN (last_nondebug_scheduled_insn))
2656 	last = INSN_FUSION_PRIORITY (last_nondebug_scheduled_insn);
2657 
2658       if (a != last && b != last)
2659 	{
2660 	  if (a == b)
2661 	    {
2662 	      a = INSN_PRIORITY (tmp);
2663 	      b = INSN_PRIORITY (tmp2);
2664 	    }
2665 	  if (a != b)
2666 	    return rfs_result (RFS_FUSION, b - a, tmp, tmp2);
2667 	  else
2668 	    return rfs_result (RFS_FUSION,
2669 			       INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2670 	}
2671       else if (a == b)
2672 	{
2673 	  gcc_assert (last_nondebug_scheduled_insn
2674 		      && !NOTE_P (last_nondebug_scheduled_insn));
2675 	  last = INSN_PRIORITY (last_nondebug_scheduled_insn);
2676 
2677 	  a = abs (INSN_PRIORITY (tmp) - last);
2678 	  b = abs (INSN_PRIORITY (tmp2) - last);
2679 	  if (a != b)
2680 	    return rfs_result (RFS_FUSION, a - b, tmp, tmp2);
2681 	  else
2682 	    return rfs_result (RFS_FUSION,
2683 			       INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2684 	}
2685       else if (a == last)
2686 	return rfs_result (RFS_FUSION, -1, tmp, tmp2);
2687       else
2688 	return rfs_result (RFS_FUSION, 1, tmp, tmp2);
2689     }
2690 
2691   if (sched_pressure != SCHED_PRESSURE_NONE)
2692     {
2693       /* Prefer insn whose scheduling results in the smallest register
2694 	 pressure excess.  */
2695       if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2696 		   + insn_delay (tmp)
2697 		   - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
2698 		   - insn_delay (tmp2))))
2699 	return rfs_result (RFS_PRESSURE_DELAY, diff, tmp, tmp2);
2700     }
2701 
2702   if (sched_pressure != SCHED_PRESSURE_NONE
2703       && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var)
2704       && INSN_TICK (tmp2) != INSN_TICK (tmp))
2705     {
2706       diff = INSN_TICK (tmp) - INSN_TICK (tmp2);
2707       return rfs_result (RFS_PRESSURE_TICK, diff, tmp, tmp2);
2708     }
2709 
2710   /* If we are doing backtracking in this schedule, prefer insns that
2711      have forward dependencies with negative cost against an insn that
2712      was already scheduled.  */
2713   if (current_sched_info->flags & DO_BACKTRACKING)
2714     {
2715       priority_val = FEEDS_BACKTRACK_INSN (tmp2) - FEEDS_BACKTRACK_INSN (tmp);
2716       if (priority_val)
2717 	return rfs_result (RFS_FEEDS_BACKTRACK_INSN, priority_val, tmp, tmp2);
2718     }
2719 
2720   /* Prefer insn with higher priority.  */
2721   priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
2722 
2723   if (flag_sched_critical_path_heuristic && priority_val)
2724     return rfs_result (RFS_PRIORITY, priority_val, tmp, tmp2);
2725 
2726   if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) >= 0)
2727     {
2728       int autopref = autopref_rank_for_schedule (tmp, tmp2);
2729       if (autopref != 0)
2730 	return autopref;
2731     }
2732 
2733   /* Prefer speculative insn with greater dependencies weakness.  */
2734   if (flag_sched_spec_insn_heuristic && spec_info)
2735     {
2736       ds_t ds1, ds2;
2737       dw_t dw1, dw2;
2738       int dw;
2739 
2740       ds1 = TODO_SPEC (tmp) & SPECULATIVE;
2741       if (ds1)
2742 	dw1 = ds_weak (ds1);
2743       else
2744 	dw1 = NO_DEP_WEAK;
2745 
2746       ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
2747       if (ds2)
2748 	dw2 = ds_weak (ds2);
2749       else
2750 	dw2 = NO_DEP_WEAK;
2751 
2752       dw = dw2 - dw1;
2753       if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
2754 	return rfs_result (RFS_SPECULATION, dw, tmp, tmp2);
2755     }
2756 
2757   info_val = (*current_sched_info->rank) (tmp, tmp2);
2758   if (flag_sched_rank_heuristic && info_val)
2759     return rfs_result (RFS_SCHED_RANK, info_val, tmp, tmp2);
2760 
2761   /* Compare insns based on their relation to the last scheduled
2762      non-debug insn.  */
2763   if (flag_sched_last_insn_heuristic && last_nondebug_scheduled_insn)
2764     {
2765       dep_t dep1;
2766       dep_t dep2;
2767       rtx_insn *last = last_nondebug_scheduled_insn;
2768 
2769       /* Classify the instructions into three classes:
2770          1) Data dependent on last schedule insn.
2771          2) Anti/Output dependent on last scheduled insn.
2772          3) Independent of last scheduled insn, or has latency of one.
2773          Choose the insn from the highest numbered class if different.  */
2774       dep1 = sd_find_dep_between (last, tmp, true);
2775 
2776       if (dep1 == NULL || dep_cost (dep1) == 1)
2777 	tmp_class = 3;
2778       else if (/* Data dependence.  */
2779 	       DEP_TYPE (dep1) == REG_DEP_TRUE)
2780 	tmp_class = 1;
2781       else
2782 	tmp_class = 2;
2783 
2784       dep2 = sd_find_dep_between (last, tmp2, true);
2785 
2786       if (dep2 == NULL || dep_cost (dep2)  == 1)
2787 	tmp2_class = 3;
2788       else if (/* Data dependence.  */
2789 	       DEP_TYPE (dep2) == REG_DEP_TRUE)
2790 	tmp2_class = 1;
2791       else
2792 	tmp2_class = 2;
2793 
2794       if ((val = tmp2_class - tmp_class))
2795 	return rfs_result (RFS_LAST_INSN, val, tmp, tmp2);
2796     }
2797 
2798   /* Prefer instructions that occur earlier in the model schedule.  */
2799   if (sched_pressure == SCHED_PRESSURE_MODEL
2800       && INSN_BB (tmp) == target_bb && INSN_BB (tmp2) == target_bb)
2801     {
2802       diff = model_index (tmp) - model_index (tmp2);
2803       gcc_assert (diff != 0);
2804       return rfs_result (RFS_PRESSURE_INDEX, diff, tmp, tmp2);
2805     }
2806 
2807   /* Prefer the insn which has more later insns that depend on it.
2808      This gives the scheduler more freedom when scheduling later
2809      instructions at the expense of added register pressure.  */
2810 
2811   val = (dep_list_size (tmp2, SD_LIST_FORW)
2812 	 - dep_list_size (tmp, SD_LIST_FORW));
2813 
2814   if (flag_sched_dep_count_heuristic && val != 0)
2815     return rfs_result (RFS_DEP_COUNT, val, tmp, tmp2);
2816 
2817   /* If insns are equally good, sort by INSN_LUID (original insn order),
2818      so that we make the sort stable.  This minimizes instruction movement,
2819      thus minimizing sched's effect on debugging and cross-jumping.  */
2820   return rfs_result (RFS_TIE, INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2821 }
2822 
2823 /* Resort the array A in which only element at index N may be out of order.  */
2824 
2825 HAIFA_INLINE static void
swap_sort(rtx_insn ** a,int n)2826 swap_sort (rtx_insn **a, int n)
2827 {
2828   rtx_insn *insn = a[n - 1];
2829   int i = n - 2;
2830 
2831   while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
2832     {
2833       a[i + 1] = a[i];
2834       i -= 1;
2835     }
2836   a[i + 1] = insn;
2837 }
2838 
2839 /* Add INSN to the insn queue so that it can be executed at least
2840    N_CYCLES after the currently executing insn.  Preserve insns
2841    chain for debugging purposes.  REASON will be printed in debugging
2842    output.  */
2843 
2844 HAIFA_INLINE static void
queue_insn(rtx_insn * insn,int n_cycles,const char * reason)2845 queue_insn (rtx_insn *insn, int n_cycles, const char *reason)
2846 {
2847   int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
2848   rtx_insn_list *link = alloc_INSN_LIST (insn, insn_queue[next_q]);
2849   int new_tick;
2850 
2851   gcc_assert (n_cycles <= max_insn_queue_index);
2852   gcc_assert (!DEBUG_INSN_P (insn));
2853 
2854   insn_queue[next_q] = link;
2855   q_size += 1;
2856 
2857   if (sched_verbose >= 2)
2858     {
2859       fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
2860 	       (*current_sched_info->print_insn) (insn, 0));
2861 
2862       fprintf (sched_dump, "queued for %d cycles (%s).\n", n_cycles, reason);
2863     }
2864 
2865   QUEUE_INDEX (insn) = next_q;
2866 
2867   if (current_sched_info->flags & DO_BACKTRACKING)
2868     {
2869       new_tick = clock_var + n_cycles;
2870       if (INSN_TICK (insn) == INVALID_TICK || INSN_TICK (insn) < new_tick)
2871 	INSN_TICK (insn) = new_tick;
2872 
2873       if (INSN_EXACT_TICK (insn) != INVALID_TICK
2874 	  && INSN_EXACT_TICK (insn) < clock_var + n_cycles)
2875 	{
2876 	  must_backtrack = true;
2877 	  if (sched_verbose >= 2)
2878 	    fprintf (sched_dump, ";;\t\tcausing a backtrack.\n");
2879 	}
2880     }
2881 }
2882 
2883 /* Remove INSN from queue.  */
2884 static void
queue_remove(rtx_insn * insn)2885 queue_remove (rtx_insn *insn)
2886 {
2887   gcc_assert (QUEUE_INDEX (insn) >= 0);
2888   remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
2889   q_size--;
2890   QUEUE_INDEX (insn) = QUEUE_NOWHERE;
2891 }
2892 
2893 /* Return a pointer to the bottom of the ready list, i.e. the insn
2894    with the lowest priority.  */
2895 
2896 rtx_insn **
ready_lastpos(struct ready_list * ready)2897 ready_lastpos (struct ready_list *ready)
2898 {
2899   gcc_assert (ready->n_ready >= 1);
2900   return ready->vec + ready->first - ready->n_ready + 1;
2901 }
2902 
2903 /* Add an element INSN to the ready list so that it ends up with the
2904    lowest/highest priority depending on FIRST_P.  */
2905 
2906 HAIFA_INLINE static void
ready_add(struct ready_list * ready,rtx_insn * insn,bool first_p)2907 ready_add (struct ready_list *ready, rtx_insn *insn, bool first_p)
2908 {
2909   if (!first_p)
2910     {
2911       if (ready->first == ready->n_ready)
2912 	{
2913 	  memmove (ready->vec + ready->veclen - ready->n_ready,
2914 		   ready_lastpos (ready),
2915 		   ready->n_ready * sizeof (rtx));
2916 	  ready->first = ready->veclen - 1;
2917 	}
2918       ready->vec[ready->first - ready->n_ready] = insn;
2919     }
2920   else
2921     {
2922       if (ready->first == ready->veclen - 1)
2923 	{
2924 	  if (ready->n_ready)
2925 	    /* ready_lastpos() fails when called with (ready->n_ready == 0).  */
2926 	    memmove (ready->vec + ready->veclen - ready->n_ready - 1,
2927 		     ready_lastpos (ready),
2928 		     ready->n_ready * sizeof (rtx));
2929 	  ready->first = ready->veclen - 2;
2930 	}
2931       ready->vec[++(ready->first)] = insn;
2932     }
2933 
2934   ready->n_ready++;
2935   if (DEBUG_INSN_P (insn))
2936     ready->n_debug++;
2937 
2938   gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
2939   QUEUE_INDEX (insn) = QUEUE_READY;
2940 
2941   if (INSN_EXACT_TICK (insn) != INVALID_TICK
2942       && INSN_EXACT_TICK (insn) < clock_var)
2943     {
2944       must_backtrack = true;
2945     }
2946 }
2947 
2948 /* Remove the element with the highest priority from the ready list and
2949    return it.  */
2950 
2951 HAIFA_INLINE static rtx_insn *
ready_remove_first(struct ready_list * ready)2952 ready_remove_first (struct ready_list *ready)
2953 {
2954   rtx_insn *t;
2955 
2956   gcc_assert (ready->n_ready);
2957   t = ready->vec[ready->first--];
2958   ready->n_ready--;
2959   if (DEBUG_INSN_P (t))
2960     ready->n_debug--;
2961   /* If the queue becomes empty, reset it.  */
2962   if (ready->n_ready == 0)
2963     ready->first = ready->veclen - 1;
2964 
2965   gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
2966   QUEUE_INDEX (t) = QUEUE_NOWHERE;
2967 
2968   return t;
2969 }
2970 
2971 /* The following code implements multi-pass scheduling for the first
2972    cycle.  In other words, we will try to choose ready insn which
2973    permits to start maximum number of insns on the same cycle.  */
2974 
2975 /* Return a pointer to the element INDEX from the ready.  INDEX for
2976    insn with the highest priority is 0, and the lowest priority has
2977    N_READY - 1.  */
2978 
2979 rtx_insn *
ready_element(struct ready_list * ready,int index)2980 ready_element (struct ready_list *ready, int index)
2981 {
2982   gcc_assert (ready->n_ready && index < ready->n_ready);
2983 
2984   return ready->vec[ready->first - index];
2985 }
2986 
2987 /* Remove the element INDEX from the ready list and return it.  INDEX
2988    for insn with the highest priority is 0, and the lowest priority
2989    has N_READY - 1.  */
2990 
2991 HAIFA_INLINE static rtx_insn *
ready_remove(struct ready_list * ready,int index)2992 ready_remove (struct ready_list *ready, int index)
2993 {
2994   rtx_insn *t;
2995   int i;
2996 
2997   if (index == 0)
2998     return ready_remove_first (ready);
2999   gcc_assert (ready->n_ready && index < ready->n_ready);
3000   t = ready->vec[ready->first - index];
3001   ready->n_ready--;
3002   if (DEBUG_INSN_P (t))
3003     ready->n_debug--;
3004   for (i = index; i < ready->n_ready; i++)
3005     ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
3006   QUEUE_INDEX (t) = QUEUE_NOWHERE;
3007   return t;
3008 }
3009 
3010 /* Remove INSN from the ready list.  */
3011 static void
ready_remove_insn(rtx_insn * insn)3012 ready_remove_insn (rtx_insn *insn)
3013 {
3014   int i;
3015 
3016   for (i = 0; i < readyp->n_ready; i++)
3017     if (ready_element (readyp, i) == insn)
3018       {
3019         ready_remove (readyp, i);
3020         return;
3021       }
3022   gcc_unreachable ();
3023 }
3024 
3025 /* Calculate difference of two statistics set WAS and NOW.
3026    Result returned in WAS.  */
3027 static void
rank_for_schedule_stats_diff(rank_for_schedule_stats_t * was,const rank_for_schedule_stats_t * now)3028 rank_for_schedule_stats_diff (rank_for_schedule_stats_t *was,
3029 			      const rank_for_schedule_stats_t *now)
3030 {
3031   for (int i = 0; i < RFS_N; ++i)
3032     was->stats[i] = now->stats[i] - was->stats[i];
3033 }
3034 
3035 /* Print rank_for_schedule statistics.  */
3036 static void
print_rank_for_schedule_stats(const char * prefix,const rank_for_schedule_stats_t * stats,struct ready_list * ready)3037 print_rank_for_schedule_stats (const char *prefix,
3038 			       const rank_for_schedule_stats_t *stats,
3039 			       struct ready_list *ready)
3040 {
3041   for (int i = 0; i < RFS_N; ++i)
3042     if (stats->stats[i])
3043       {
3044 	fprintf (sched_dump, "%s%20s: %u", prefix, rfs_str[i], stats->stats[i]);
3045 
3046 	if (ready != NULL)
3047 	  /* Print out insns that won due to RFS_<I>.  */
3048 	  {
3049 	    rtx_insn **p = ready_lastpos (ready);
3050 
3051 	    fprintf (sched_dump, ":");
3052 	    /* Start with 1 since least-priority insn didn't have any wins.  */
3053 	    for (int j = 1; j < ready->n_ready; ++j)
3054 	      if (INSN_LAST_RFS_WIN (p[j]) == i)
3055 		fprintf (sched_dump, " %s",
3056 			 (*current_sched_info->print_insn) (p[j], 0));
3057 	  }
3058 	fprintf (sched_dump, "\n");
3059       }
3060 }
3061 
3062 /* Separate DEBUG_INSNS from normal insns.  DEBUG_INSNs go to the end
3063    of array.  */
3064 static void
ready_sort_debug(struct ready_list * ready)3065 ready_sort_debug (struct ready_list *ready)
3066 {
3067   int i;
3068   rtx_insn **first = ready_lastpos (ready);
3069 
3070   for (i = 0; i < ready->n_ready; ++i)
3071     if (!DEBUG_INSN_P (first[i]))
3072       INSN_RFS_DEBUG_ORIG_ORDER (first[i]) = i;
3073 
3074   qsort (first, ready->n_ready, sizeof (rtx), rank_for_schedule_debug);
3075 }
3076 
3077 /* Sort non-debug insns in the ready list READY by ascending priority.
3078    Assumes that all debug insns are separated from the real insns.  */
3079 static void
ready_sort_real(struct ready_list * ready)3080 ready_sort_real (struct ready_list *ready)
3081 {
3082   int i;
3083   rtx_insn **first = ready_lastpos (ready);
3084   int n_ready_real = ready->n_ready - ready->n_debug;
3085 
3086   if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
3087     for (i = 0; i < n_ready_real; ++i)
3088       setup_insn_reg_pressure_info (first[i]);
3089   else if (sched_pressure == SCHED_PRESSURE_MODEL
3090 	   && model_curr_point < model_num_insns)
3091     model_set_excess_costs (first, n_ready_real);
3092 
3093   rank_for_schedule_stats_t stats1;
3094   if (sched_verbose >= 4)
3095     stats1 = rank_for_schedule_stats;
3096 
3097   if (n_ready_real == 2)
3098     swap_sort (first, n_ready_real);
3099   else if (n_ready_real > 2)
3100     qsort (first, n_ready_real, sizeof (rtx), rank_for_schedule);
3101 
3102   if (sched_verbose >= 4)
3103     {
3104       rank_for_schedule_stats_diff (&stats1, &rank_for_schedule_stats);
3105       print_rank_for_schedule_stats (";;\t\t", &stats1, ready);
3106     }
3107 }
3108 
3109 /* Sort the ready list READY by ascending priority.  */
3110 static void
ready_sort(struct ready_list * ready)3111 ready_sort (struct ready_list *ready)
3112 {
3113   if (ready->n_debug > 0)
3114     ready_sort_debug (ready);
3115   else
3116     ready_sort_real (ready);
3117 }
3118 
3119 /* PREV is an insn that is ready to execute.  Adjust its priority if that
3120    will help shorten or lengthen register lifetimes as appropriate.  Also
3121    provide a hook for the target to tweak itself.  */
3122 
3123 HAIFA_INLINE static void
adjust_priority(rtx_insn * prev)3124 adjust_priority (rtx_insn *prev)
3125 {
3126   /* ??? There used to be code here to try and estimate how an insn
3127      affected register lifetimes, but it did it by looking at REG_DEAD
3128      notes, which we removed in schedule_region.  Nor did it try to
3129      take into account register pressure or anything useful like that.
3130 
3131      Revisit when we have a machine model to work with and not before.  */
3132 
3133   if (targetm.sched.adjust_priority)
3134     INSN_PRIORITY (prev) =
3135       targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
3136 }
3137 
3138 /* Advance DFA state STATE on one cycle.  */
3139 void
advance_state(state_t state)3140 advance_state (state_t state)
3141 {
3142   if (targetm.sched.dfa_pre_advance_cycle)
3143     targetm.sched.dfa_pre_advance_cycle ();
3144 
3145   if (targetm.sched.dfa_pre_cycle_insn)
3146     state_transition (state,
3147 		      targetm.sched.dfa_pre_cycle_insn ());
3148 
3149   state_transition (state, NULL);
3150 
3151   if (targetm.sched.dfa_post_cycle_insn)
3152     state_transition (state,
3153 		      targetm.sched.dfa_post_cycle_insn ());
3154 
3155   if (targetm.sched.dfa_post_advance_cycle)
3156     targetm.sched.dfa_post_advance_cycle ();
3157 }
3158 
3159 /* Advance time on one cycle.  */
3160 HAIFA_INLINE static void
advance_one_cycle(void)3161 advance_one_cycle (void)
3162 {
3163   advance_state (curr_state);
3164   if (sched_verbose >= 4)
3165     fprintf (sched_dump, ";;\tAdvance the current state.\n");
3166 }
3167 
3168 /* Update register pressure after scheduling INSN.  */
3169 static void
update_register_pressure(rtx_insn * insn)3170 update_register_pressure (rtx_insn *insn)
3171 {
3172   struct reg_use_data *use;
3173   struct reg_set_data *set;
3174 
3175   gcc_checking_assert (!DEBUG_INSN_P (insn));
3176 
3177   for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
3178     if (dying_use_p (use))
3179       mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
3180 				 use->regno, false);
3181   for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
3182     mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
3183 			       set->regno, true);
3184 }
3185 
3186 /* Set up or update (if UPDATE_P) max register pressure (see its
3187    meaning in sched-int.h::_haifa_insn_data) for all current BB insns
3188    after insn AFTER.  */
3189 static void
setup_insn_max_reg_pressure(rtx_insn * after,bool update_p)3190 setup_insn_max_reg_pressure (rtx_insn *after, bool update_p)
3191 {
3192   int i, p;
3193   bool eq_p;
3194   rtx_insn *insn;
3195   static int max_reg_pressure[N_REG_CLASSES];
3196 
3197   save_reg_pressure ();
3198   for (i = 0; i < ira_pressure_classes_num; i++)
3199     max_reg_pressure[ira_pressure_classes[i]]
3200       = curr_reg_pressure[ira_pressure_classes[i]];
3201   for (insn = NEXT_INSN (after);
3202        insn != NULL_RTX && ! BARRIER_P (insn)
3203 	 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
3204        insn = NEXT_INSN (insn))
3205     if (NONDEBUG_INSN_P (insn))
3206       {
3207 	eq_p = true;
3208 	for (i = 0; i < ira_pressure_classes_num; i++)
3209 	  {
3210 	    p = max_reg_pressure[ira_pressure_classes[i]];
3211 	    if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
3212 	      {
3213 		eq_p = false;
3214 		INSN_MAX_REG_PRESSURE (insn)[i]
3215 		  = max_reg_pressure[ira_pressure_classes[i]];
3216 	      }
3217 	  }
3218 	if (update_p && eq_p)
3219 	  break;
3220 	update_register_pressure (insn);
3221 	for (i = 0; i < ira_pressure_classes_num; i++)
3222 	  if (max_reg_pressure[ira_pressure_classes[i]]
3223 	      < curr_reg_pressure[ira_pressure_classes[i]])
3224 	    max_reg_pressure[ira_pressure_classes[i]]
3225 	      = curr_reg_pressure[ira_pressure_classes[i]];
3226       }
3227   restore_reg_pressure ();
3228 }
3229 
3230 /* Update the current register pressure after scheduling INSN.  Update
3231    also max register pressure for unscheduled insns of the current
3232    BB.  */
3233 static void
update_reg_and_insn_max_reg_pressure(rtx_insn * insn)3234 update_reg_and_insn_max_reg_pressure (rtx_insn *insn)
3235 {
3236   int i;
3237   int before[N_REG_CLASSES];
3238 
3239   for (i = 0; i < ira_pressure_classes_num; i++)
3240     before[i] = curr_reg_pressure[ira_pressure_classes[i]];
3241   update_register_pressure (insn);
3242   for (i = 0; i < ira_pressure_classes_num; i++)
3243     if (curr_reg_pressure[ira_pressure_classes[i]] != before[i])
3244       break;
3245   if (i < ira_pressure_classes_num)
3246     setup_insn_max_reg_pressure (insn, true);
3247 }
3248 
3249 /* Set up register pressure at the beginning of basic block BB whose
3250    insns starting after insn AFTER.  Set up also max register pressure
3251    for all insns of the basic block.  */
3252 void
sched_setup_bb_reg_pressure_info(basic_block bb,rtx_insn * after)3253 sched_setup_bb_reg_pressure_info (basic_block bb, rtx_insn *after)
3254 {
3255   gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
3256   initiate_bb_reg_pressure_info (bb);
3257   setup_insn_max_reg_pressure (after, false);
3258 }
3259 
3260 /* If doing predication while scheduling, verify whether INSN, which
3261    has just been scheduled, clobbers the conditions of any
3262    instructions that must be predicated in order to break their
3263    dependencies.  If so, remove them from the queues so that they will
3264    only be scheduled once their control dependency is resolved.  */
3265 
3266 static void
check_clobbered_conditions(rtx_insn * insn)3267 check_clobbered_conditions (rtx_insn *insn)
3268 {
3269   HARD_REG_SET t;
3270   int i;
3271 
3272   if ((current_sched_info->flags & DO_PREDICATION) == 0)
3273     return;
3274 
3275   find_all_hard_reg_sets (insn, &t, true);
3276 
3277  restart:
3278   for (i = 0; i < ready.n_ready; i++)
3279     {
3280       rtx_insn *x = ready_element (&ready, i);
3281       if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3282 	{
3283 	  ready_remove_insn (x);
3284 	  goto restart;
3285 	}
3286     }
3287   for (i = 0; i <= max_insn_queue_index; i++)
3288     {
3289       rtx_insn_list *link;
3290       int q = NEXT_Q_AFTER (q_ptr, i);
3291 
3292     restart_queue:
3293       for (link = insn_queue[q]; link; link = link->next ())
3294 	{
3295 	  rtx_insn *x = link->insn ();
3296 	  if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3297 	    {
3298 	      queue_remove (x);
3299 	      goto restart_queue;
3300 	    }
3301 	}
3302     }
3303 }
3304 
3305 /* Return (in order):
3306 
3307    - positive if INSN adversely affects the pressure on one
3308      register class
3309 
3310    - negative if INSN reduces the pressure on one register class
3311 
3312    - 0 if INSN doesn't affect the pressure on any register class.  */
3313 
3314 static int
model_classify_pressure(struct model_insn_info * insn)3315 model_classify_pressure (struct model_insn_info *insn)
3316 {
3317   struct reg_pressure_data *reg_pressure;
3318   int death[N_REG_CLASSES];
3319   int pci, cl, sum;
3320 
3321   calculate_reg_deaths (insn->insn, death);
3322   reg_pressure = INSN_REG_PRESSURE (insn->insn);
3323   sum = 0;
3324   for (pci = 0; pci < ira_pressure_classes_num; pci++)
3325     {
3326       cl = ira_pressure_classes[pci];
3327       if (death[cl] < reg_pressure[pci].set_increase)
3328 	return 1;
3329       sum += reg_pressure[pci].set_increase - death[cl];
3330     }
3331   return sum;
3332 }
3333 
3334 /* Return true if INSN1 should come before INSN2 in the model schedule.  */
3335 
3336 static int
model_order_p(struct model_insn_info * insn1,struct model_insn_info * insn2)3337 model_order_p (struct model_insn_info *insn1, struct model_insn_info *insn2)
3338 {
3339   unsigned int height1, height2;
3340   unsigned int priority1, priority2;
3341 
3342   /* Prefer instructions with a higher model priority.  */
3343   if (insn1->model_priority != insn2->model_priority)
3344     return insn1->model_priority > insn2->model_priority;
3345 
3346   /* Combine the length of the longest path of satisfied true dependencies
3347      that leads to each instruction (depth) with the length of the longest
3348      path of any dependencies that leads from the instruction (alap).
3349      Prefer instructions with the greatest combined length.  If the combined
3350      lengths are equal, prefer instructions with the greatest depth.
3351 
3352      The idea is that, if we have a set S of "equal" instructions that each
3353      have ALAP value X, and we pick one such instruction I, any true-dependent
3354      successors of I that have ALAP value X - 1 should be preferred over S.
3355      This encourages the schedule to be "narrow" rather than "wide".
3356      However, if I is a low-priority instruction that we decided to
3357      schedule because of its model_classify_pressure, and if there
3358      is a set of higher-priority instructions T, the aforementioned
3359      successors of I should not have the edge over T.  */
3360   height1 = insn1->depth + insn1->alap;
3361   height2 = insn2->depth + insn2->alap;
3362   if (height1 != height2)
3363     return height1 > height2;
3364   if (insn1->depth != insn2->depth)
3365     return insn1->depth > insn2->depth;
3366 
3367   /* We have no real preference between INSN1 an INSN2 as far as attempts
3368      to reduce pressure go.  Prefer instructions with higher priorities.  */
3369   priority1 = INSN_PRIORITY (insn1->insn);
3370   priority2 = INSN_PRIORITY (insn2->insn);
3371   if (priority1 != priority2)
3372     return priority1 > priority2;
3373 
3374   /* Use the original rtl sequence as a tie-breaker.  */
3375   return insn1 < insn2;
3376 }
3377 
3378 /* Add INSN to the model worklist immediately after PREV.  Add it to the
3379    beginning of the list if PREV is null.  */
3380 
3381 static void
model_add_to_worklist_at(struct model_insn_info * insn,struct model_insn_info * prev)3382 model_add_to_worklist_at (struct model_insn_info *insn,
3383 			  struct model_insn_info *prev)
3384 {
3385   gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_NOWHERE);
3386   QUEUE_INDEX (insn->insn) = QUEUE_READY;
3387 
3388   insn->prev = prev;
3389   if (prev)
3390     {
3391       insn->next = prev->next;
3392       prev->next = insn;
3393     }
3394   else
3395     {
3396       insn->next = model_worklist;
3397       model_worklist = insn;
3398     }
3399   if (insn->next)
3400     insn->next->prev = insn;
3401 }
3402 
3403 /* Remove INSN from the model worklist.  */
3404 
3405 static void
model_remove_from_worklist(struct model_insn_info * insn)3406 model_remove_from_worklist (struct model_insn_info *insn)
3407 {
3408   gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_READY);
3409   QUEUE_INDEX (insn->insn) = QUEUE_NOWHERE;
3410 
3411   if (insn->prev)
3412     insn->prev->next = insn->next;
3413   else
3414     model_worklist = insn->next;
3415   if (insn->next)
3416     insn->next->prev = insn->prev;
3417 }
3418 
3419 /* Add INSN to the model worklist.  Start looking for a suitable position
3420    between neighbors PREV and NEXT, testing at most MAX_SCHED_READY_INSNS
3421    insns either side.  A null PREV indicates the beginning of the list and
3422    a null NEXT indicates the end.  */
3423 
3424 static void
model_add_to_worklist(struct model_insn_info * insn,struct model_insn_info * prev,struct model_insn_info * next)3425 model_add_to_worklist (struct model_insn_info *insn,
3426 		       struct model_insn_info *prev,
3427 		       struct model_insn_info *next)
3428 {
3429   int count;
3430 
3431   count = MAX_SCHED_READY_INSNS;
3432   if (count > 0 && prev && model_order_p (insn, prev))
3433     do
3434       {
3435 	count--;
3436 	prev = prev->prev;
3437       }
3438     while (count > 0 && prev && model_order_p (insn, prev));
3439   else
3440     while (count > 0 && next && model_order_p (next, insn))
3441       {
3442 	count--;
3443 	prev = next;
3444 	next = next->next;
3445       }
3446   model_add_to_worklist_at (insn, prev);
3447 }
3448 
3449 /* INSN may now have a higher priority (in the model_order_p sense)
3450    than before.  Move it up the worklist if necessary.  */
3451 
3452 static void
model_promote_insn(struct model_insn_info * insn)3453 model_promote_insn (struct model_insn_info *insn)
3454 {
3455   struct model_insn_info *prev;
3456   int count;
3457 
3458   prev = insn->prev;
3459   count = MAX_SCHED_READY_INSNS;
3460   while (count > 0 && prev && model_order_p (insn, prev))
3461     {
3462       count--;
3463       prev = prev->prev;
3464     }
3465   if (prev != insn->prev)
3466     {
3467       model_remove_from_worklist (insn);
3468       model_add_to_worklist_at (insn, prev);
3469     }
3470 }
3471 
3472 /* Add INSN to the end of the model schedule.  */
3473 
3474 static void
model_add_to_schedule(rtx_insn * insn)3475 model_add_to_schedule (rtx_insn *insn)
3476 {
3477   unsigned int point;
3478 
3479   gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
3480   QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
3481 
3482   point = model_schedule.length ();
3483   model_schedule.quick_push (insn);
3484   INSN_MODEL_INDEX (insn) = point + 1;
3485 }
3486 
3487 /* Analyze the instructions that are to be scheduled, setting up
3488    MODEL_INSN_INFO (...) and model_num_insns accordingly.  Add ready
3489    instructions to model_worklist.  */
3490 
3491 static void
model_analyze_insns(void)3492 model_analyze_insns (void)
3493 {
3494   rtx_insn *start, *end, *iter;
3495   sd_iterator_def sd_it;
3496   dep_t dep;
3497   struct model_insn_info *insn, *con;
3498 
3499   model_num_insns = 0;
3500   start = PREV_INSN (current_sched_info->next_tail);
3501   end = current_sched_info->prev_head;
3502   for (iter = start; iter != end; iter = PREV_INSN (iter))
3503     if (NONDEBUG_INSN_P (iter))
3504       {
3505 	insn = MODEL_INSN_INFO (iter);
3506 	insn->insn = iter;
3507 	FOR_EACH_DEP (iter, SD_LIST_FORW, sd_it, dep)
3508 	  {
3509 	    con = MODEL_INSN_INFO (DEP_CON (dep));
3510 	    if (con->insn && insn->alap < con->alap + 1)
3511 	      insn->alap = con->alap + 1;
3512 	  }
3513 
3514 	insn->old_queue = QUEUE_INDEX (iter);
3515 	QUEUE_INDEX (iter) = QUEUE_NOWHERE;
3516 
3517 	insn->unscheduled_preds = dep_list_size (iter, SD_LIST_HARD_BACK);
3518 	if (insn->unscheduled_preds == 0)
3519 	  model_add_to_worklist (insn, NULL, model_worklist);
3520 
3521 	model_num_insns++;
3522       }
3523 }
3524 
3525 /* The global state describes the register pressure at the start of the
3526    model schedule.  Initialize GROUP accordingly.  */
3527 
3528 static void
model_init_pressure_group(struct model_pressure_group * group)3529 model_init_pressure_group (struct model_pressure_group *group)
3530 {
3531   int pci, cl;
3532 
3533   for (pci = 0; pci < ira_pressure_classes_num; pci++)
3534     {
3535       cl = ira_pressure_classes[pci];
3536       group->limits[pci].pressure = curr_reg_pressure[cl];
3537       group->limits[pci].point = 0;
3538     }
3539   /* Use index model_num_insns to record the state after the last
3540      instruction in the model schedule.  */
3541   group->model = XNEWVEC (struct model_pressure_data,
3542 			  (model_num_insns + 1) * ira_pressure_classes_num);
3543 }
3544 
3545 /* Record that MODEL_REF_PRESSURE (GROUP, POINT, PCI) is PRESSURE.
3546    Update the maximum pressure for the whole schedule.  */
3547 
3548 static void
model_record_pressure(struct model_pressure_group * group,int point,int pci,int pressure)3549 model_record_pressure (struct model_pressure_group *group,
3550 		       int point, int pci, int pressure)
3551 {
3552   MODEL_REF_PRESSURE (group, point, pci) = pressure;
3553   if (group->limits[pci].pressure < pressure)
3554     {
3555       group->limits[pci].pressure = pressure;
3556       group->limits[pci].point = point;
3557     }
3558 }
3559 
3560 /* INSN has just been added to the end of the model schedule.  Record its
3561    register-pressure information.  */
3562 
3563 static void
model_record_pressures(struct model_insn_info * insn)3564 model_record_pressures (struct model_insn_info *insn)
3565 {
3566   struct reg_pressure_data *reg_pressure;
3567   int point, pci, cl, delta;
3568   int death[N_REG_CLASSES];
3569 
3570   point = model_index (insn->insn);
3571   if (sched_verbose >= 2)
3572     {
3573       if (point == 0)
3574 	{
3575 	  fprintf (sched_dump, "\n;;\tModel schedule:\n;;\n");
3576 	  fprintf (sched_dump, ";;\t| idx insn | mpri hght dpth prio |\n");
3577 	}
3578       fprintf (sched_dump, ";;\t| %3d %4d | %4d %4d %4d %4d | %-30s ",
3579 	       point, INSN_UID (insn->insn), insn->model_priority,
3580 	       insn->depth + insn->alap, insn->depth,
3581 	       INSN_PRIORITY (insn->insn),
3582 	       str_pattern_slim (PATTERN (insn->insn)));
3583     }
3584   calculate_reg_deaths (insn->insn, death);
3585   reg_pressure = INSN_REG_PRESSURE (insn->insn);
3586   for (pci = 0; pci < ira_pressure_classes_num; pci++)
3587     {
3588       cl = ira_pressure_classes[pci];
3589       delta = reg_pressure[pci].set_increase - death[cl];
3590       if (sched_verbose >= 2)
3591 	fprintf (sched_dump, " %s:[%d,%+d]", reg_class_names[cl],
3592 		 curr_reg_pressure[cl], delta);
3593       model_record_pressure (&model_before_pressure, point, pci,
3594 			     curr_reg_pressure[cl]);
3595     }
3596   if (sched_verbose >= 2)
3597     fprintf (sched_dump, "\n");
3598 }
3599 
3600 /* All instructions have been added to the model schedule.  Record the
3601    final register pressure in GROUP and set up all MODEL_MAX_PRESSUREs.  */
3602 
3603 static void
model_record_final_pressures(struct model_pressure_group * group)3604 model_record_final_pressures (struct model_pressure_group *group)
3605 {
3606   int point, pci, max_pressure, ref_pressure, cl;
3607 
3608   for (pci = 0; pci < ira_pressure_classes_num; pci++)
3609     {
3610       /* Record the final pressure for this class.  */
3611       cl = ira_pressure_classes[pci];
3612       point = model_num_insns;
3613       ref_pressure = curr_reg_pressure[cl];
3614       model_record_pressure (group, point, pci, ref_pressure);
3615 
3616       /* Record the original maximum pressure.  */
3617       group->limits[pci].orig_pressure = group->limits[pci].pressure;
3618 
3619       /* Update the MODEL_MAX_PRESSURE for every point of the schedule.  */
3620       max_pressure = ref_pressure;
3621       MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3622       while (point > 0)
3623 	{
3624 	  point--;
3625 	  ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
3626 	  max_pressure = MAX (max_pressure, ref_pressure);
3627 	  MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3628 	}
3629     }
3630 }
3631 
3632 /* Update all successors of INSN, given that INSN has just been scheduled.  */
3633 
3634 static void
model_add_successors_to_worklist(struct model_insn_info * insn)3635 model_add_successors_to_worklist (struct model_insn_info *insn)
3636 {
3637   sd_iterator_def sd_it;
3638   struct model_insn_info *con;
3639   dep_t dep;
3640 
3641   FOR_EACH_DEP (insn->insn, SD_LIST_FORW, sd_it, dep)
3642     {
3643       con = MODEL_INSN_INFO (DEP_CON (dep));
3644       /* Ignore debug instructions, and instructions from other blocks.  */
3645       if (con->insn)
3646 	{
3647 	  con->unscheduled_preds--;
3648 
3649 	  /* Update the depth field of each true-dependent successor.
3650 	     Increasing the depth gives them a higher priority than
3651 	     before.  */
3652 	  if (DEP_TYPE (dep) == REG_DEP_TRUE && con->depth < insn->depth + 1)
3653 	    {
3654 	      con->depth = insn->depth + 1;
3655 	      if (QUEUE_INDEX (con->insn) == QUEUE_READY)
3656 		model_promote_insn (con);
3657 	    }
3658 
3659 	  /* If this is a true dependency, or if there are no remaining
3660 	     dependencies for CON (meaning that CON only had non-true
3661 	     dependencies), make sure that CON is on the worklist.
3662 	     We don't bother otherwise because it would tend to fill the
3663 	     worklist with a lot of low-priority instructions that are not
3664 	     yet ready to issue.  */
3665 	  if ((con->depth > 0 || con->unscheduled_preds == 0)
3666 	      && QUEUE_INDEX (con->insn) == QUEUE_NOWHERE)
3667 	    model_add_to_worklist (con, insn, insn->next);
3668 	}
3669     }
3670 }
3671 
3672 /* Give INSN a higher priority than any current instruction, then give
3673    unscheduled predecessors of INSN a higher priority still.  If any of
3674    those predecessors are not on the model worklist, do the same for its
3675    predecessors, and so on.  */
3676 
3677 static void
model_promote_predecessors(struct model_insn_info * insn)3678 model_promote_predecessors (struct model_insn_info *insn)
3679 {
3680   struct model_insn_info *pro, *first;
3681   sd_iterator_def sd_it;
3682   dep_t dep;
3683 
3684   if (sched_verbose >= 7)
3685     fprintf (sched_dump, ";;\t+--- priority of %d = %d, priority of",
3686 	     INSN_UID (insn->insn), model_next_priority);
3687   insn->model_priority = model_next_priority++;
3688   model_remove_from_worklist (insn);
3689   model_add_to_worklist_at (insn, NULL);
3690 
3691   first = NULL;
3692   for (;;)
3693     {
3694       FOR_EACH_DEP (insn->insn, SD_LIST_HARD_BACK, sd_it, dep)
3695 	{
3696 	  pro = MODEL_INSN_INFO (DEP_PRO (dep));
3697 	  /* The first test is to ignore debug instructions, and instructions
3698 	     from other blocks.  */
3699 	  if (pro->insn
3700 	      && pro->model_priority != model_next_priority
3701 	      && QUEUE_INDEX (pro->insn) != QUEUE_SCHEDULED)
3702 	    {
3703 	      pro->model_priority = model_next_priority;
3704 	      if (sched_verbose >= 7)
3705 		fprintf (sched_dump, " %d", INSN_UID (pro->insn));
3706 	      if (QUEUE_INDEX (pro->insn) == QUEUE_READY)
3707 		{
3708 		  /* PRO is already in the worklist, but it now has
3709 		     a higher priority than before.  Move it at the
3710 		     appropriate place.  */
3711 		  model_remove_from_worklist (pro);
3712 		  model_add_to_worklist (pro, NULL, model_worklist);
3713 		}
3714 	      else
3715 		{
3716 		  /* PRO isn't in the worklist.  Recursively process
3717 		     its predecessors until we find one that is.  */
3718 		  pro->next = first;
3719 		  first = pro;
3720 		}
3721 	    }
3722 	}
3723       if (!first)
3724 	break;
3725       insn = first;
3726       first = insn->next;
3727     }
3728   if (sched_verbose >= 7)
3729     fprintf (sched_dump, " = %d\n", model_next_priority);
3730   model_next_priority++;
3731 }
3732 
3733 /* Pick one instruction from model_worklist and process it.  */
3734 
3735 static void
model_choose_insn(void)3736 model_choose_insn (void)
3737 {
3738   struct model_insn_info *insn, *fallback;
3739   int count;
3740 
3741   if (sched_verbose >= 7)
3742     {
3743       fprintf (sched_dump, ";;\t+--- worklist:\n");
3744       insn = model_worklist;
3745       count = MAX_SCHED_READY_INSNS;
3746       while (count > 0 && insn)
3747 	{
3748 	  fprintf (sched_dump, ";;\t+---   %d [%d, %d, %d, %d]\n",
3749 		   INSN_UID (insn->insn), insn->model_priority,
3750 		   insn->depth + insn->alap, insn->depth,
3751 		   INSN_PRIORITY (insn->insn));
3752 	  count--;
3753 	  insn = insn->next;
3754 	}
3755     }
3756 
3757   /* Look for a ready instruction whose model_classify_priority is zero
3758      or negative, picking the highest-priority one.  Adding such an
3759      instruction to the schedule now should do no harm, and may actually
3760      do some good.
3761 
3762      Failing that, see whether there is an instruction with the highest
3763      extant model_priority that is not yet ready, but which would reduce
3764      pressure if it became ready.  This is designed to catch cases like:
3765 
3766        (set (mem (reg R1)) (reg R2))
3767 
3768      where the instruction is the last remaining use of R1 and where the
3769      value of R2 is not yet available (or vice versa).  The death of R1
3770      means that this instruction already reduces pressure.  It is of
3771      course possible that the computation of R2 involves other registers
3772      that are hard to kill, but such cases are rare enough for this
3773      heuristic to be a win in general.
3774 
3775      Failing that, just pick the highest-priority instruction in the
3776      worklist.  */
3777   count = MAX_SCHED_READY_INSNS;
3778   insn = model_worklist;
3779   fallback = 0;
3780   for (;;)
3781     {
3782       if (count == 0 || !insn)
3783 	{
3784 	  insn = fallback ? fallback : model_worklist;
3785 	  break;
3786 	}
3787       if (insn->unscheduled_preds)
3788 	{
3789 	  if (model_worklist->model_priority == insn->model_priority
3790 	      && !fallback
3791 	      && model_classify_pressure (insn) < 0)
3792 	    fallback = insn;
3793 	}
3794       else
3795 	{
3796 	  if (model_classify_pressure (insn) <= 0)
3797 	    break;
3798 	}
3799       count--;
3800       insn = insn->next;
3801     }
3802 
3803   if (sched_verbose >= 7 && insn != model_worklist)
3804     {
3805       if (insn->unscheduled_preds)
3806 	fprintf (sched_dump, ";;\t+--- promoting insn %d, with dependencies\n",
3807 		 INSN_UID (insn->insn));
3808       else
3809 	fprintf (sched_dump, ";;\t+--- promoting insn %d, which is ready\n",
3810 		 INSN_UID (insn->insn));
3811     }
3812   if (insn->unscheduled_preds)
3813     /* INSN isn't yet ready to issue.  Give all its predecessors the
3814        highest priority.  */
3815     model_promote_predecessors (insn);
3816   else
3817     {
3818       /* INSN is ready.  Add it to the end of model_schedule and
3819 	 process its successors.  */
3820       model_add_successors_to_worklist (insn);
3821       model_remove_from_worklist (insn);
3822       model_add_to_schedule (insn->insn);
3823       model_record_pressures (insn);
3824       update_register_pressure (insn->insn);
3825     }
3826 }
3827 
3828 /* Restore all QUEUE_INDEXs to the values that they had before
3829    model_start_schedule was called.  */
3830 
3831 static void
model_reset_queue_indices(void)3832 model_reset_queue_indices (void)
3833 {
3834   unsigned int i;
3835   rtx_insn *insn;
3836 
3837   FOR_EACH_VEC_ELT (model_schedule, i, insn)
3838     QUEUE_INDEX (insn) = MODEL_INSN_INFO (insn)->old_queue;
3839 }
3840 
3841 /* We have calculated the model schedule and spill costs.  Print a summary
3842    to sched_dump.  */
3843 
3844 static void
model_dump_pressure_summary(void)3845 model_dump_pressure_summary (void)
3846 {
3847   int pci, cl;
3848 
3849   fprintf (sched_dump, ";; Pressure summary:");
3850   for (pci = 0; pci < ira_pressure_classes_num; pci++)
3851     {
3852       cl = ira_pressure_classes[pci];
3853       fprintf (sched_dump, " %s:%d", reg_class_names[cl],
3854 	       model_before_pressure.limits[pci].pressure);
3855     }
3856   fprintf (sched_dump, "\n\n");
3857 }
3858 
3859 /* Initialize the SCHED_PRESSURE_MODEL information for the current
3860    scheduling region.  */
3861 
3862 static void
model_start_schedule(basic_block bb)3863 model_start_schedule (basic_block bb)
3864 {
3865   model_next_priority = 1;
3866   model_schedule.create (sched_max_luid);
3867   model_insns = XCNEWVEC (struct model_insn_info, sched_max_luid);
3868 
3869   gcc_assert (bb == BLOCK_FOR_INSN (NEXT_INSN (current_sched_info->prev_head)));
3870   initiate_reg_pressure_info (df_get_live_in (bb));
3871 
3872   model_analyze_insns ();
3873   model_init_pressure_group (&model_before_pressure);
3874   while (model_worklist)
3875     model_choose_insn ();
3876   gcc_assert (model_num_insns == (int) model_schedule.length ());
3877   if (sched_verbose >= 2)
3878     fprintf (sched_dump, "\n");
3879 
3880   model_record_final_pressures (&model_before_pressure);
3881   model_reset_queue_indices ();
3882 
3883   XDELETEVEC (model_insns);
3884 
3885   model_curr_point = 0;
3886   initiate_reg_pressure_info (df_get_live_in (bb));
3887   if (sched_verbose >= 1)
3888     model_dump_pressure_summary ();
3889 }
3890 
3891 /* Free the information associated with GROUP.  */
3892 
3893 static void
model_finalize_pressure_group(struct model_pressure_group * group)3894 model_finalize_pressure_group (struct model_pressure_group *group)
3895 {
3896   XDELETEVEC (group->model);
3897 }
3898 
3899 /* Free the information created by model_start_schedule.  */
3900 
3901 static void
model_end_schedule(void)3902 model_end_schedule (void)
3903 {
3904   model_finalize_pressure_group (&model_before_pressure);
3905   model_schedule.release ();
3906 }
3907 
3908 /* Prepare reg pressure scheduling for basic block BB.  */
3909 static void
sched_pressure_start_bb(basic_block bb)3910 sched_pressure_start_bb (basic_block bb)
3911 {
3912   /* Set the number of available registers for each class taking into account
3913      relative probability of current basic block versus function prologue and
3914      epilogue.
3915      * If the basic block executes much more often than the prologue/epilogue
3916      (e.g., inside a hot loop), then cost of spill in the prologue is close to
3917      nil, so the effective number of available registers is
3918      (ira_class_hard_regs_num[cl] - 0).
3919      * If the basic block executes as often as the prologue/epilogue,
3920      then spill in the block is as costly as in the prologue, so the effective
3921      number of available registers is
3922      (ira_class_hard_regs_num[cl] - call_used_regs_num[cl]).
3923      Note that all-else-equal, we prefer to spill in the prologue, since that
3924      allows "extra" registers for other basic blocks of the function.
3925      * If the basic block is on the cold path of the function and executes
3926      rarely, then we should always prefer to spill in the block, rather than
3927      in the prologue/epilogue.  The effective number of available register is
3928      (ira_class_hard_regs_num[cl] - call_used_regs_num[cl]).  */
3929   {
3930     int i;
3931     int entry_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
3932     int bb_freq = bb->frequency;
3933 
3934     if (bb_freq == 0)
3935       {
3936 	if (entry_freq == 0)
3937 	  entry_freq = bb_freq = 1;
3938       }
3939     if (bb_freq < entry_freq)
3940       bb_freq = entry_freq;
3941 
3942     for (i = 0; i < ira_pressure_classes_num; ++i)
3943       {
3944 	enum reg_class cl = ira_pressure_classes[i];
3945 	sched_class_regs_num[cl] = ira_class_hard_regs_num[cl];
3946 	sched_class_regs_num[cl]
3947 	  -= (call_used_regs_num[cl] * entry_freq) / bb_freq;
3948       }
3949   }
3950 
3951   if (sched_pressure == SCHED_PRESSURE_MODEL)
3952     model_start_schedule (bb);
3953 }
3954 
3955 /* A structure that holds local state for the loop in schedule_block.  */
3956 struct sched_block_state
3957 {
3958   /* True if no real insns have been scheduled in the current cycle.  */
3959   bool first_cycle_insn_p;
3960   /* True if a shadow insn has been scheduled in the current cycle, which
3961      means that no more normal insns can be issued.  */
3962   bool shadows_only_p;
3963   /* True if we're winding down a modulo schedule, which means that we only
3964      issue insns with INSN_EXACT_TICK set.  */
3965   bool modulo_epilogue;
3966   /* Initialized with the machine's issue rate every cycle, and updated
3967      by calls to the variable_issue hook.  */
3968   int can_issue_more;
3969 };
3970 
3971 /* INSN is the "currently executing insn".  Launch each insn which was
3972    waiting on INSN.  READY is the ready list which contains the insns
3973    that are ready to fire.  CLOCK is the current cycle.  The function
3974    returns necessary cycle advance after issuing the insn (it is not
3975    zero for insns in a schedule group).  */
3976 
3977 static int
schedule_insn(rtx_insn * insn)3978 schedule_insn (rtx_insn *insn)
3979 {
3980   sd_iterator_def sd_it;
3981   dep_t dep;
3982   int i;
3983   int advance = 0;
3984 
3985   if (sched_verbose >= 1)
3986     {
3987       struct reg_pressure_data *pressure_info;
3988       fprintf (sched_dump, ";;\t%3i--> %s %-40s:",
3989 	       clock_var, (*current_sched_info->print_insn) (insn, 1),
3990 	       str_pattern_slim (PATTERN (insn)));
3991 
3992       if (recog_memoized (insn) < 0)
3993 	fprintf (sched_dump, "nothing");
3994       else
3995 	print_reservation (sched_dump, insn);
3996       pressure_info = INSN_REG_PRESSURE (insn);
3997       if (pressure_info != NULL)
3998 	{
3999 	  fputc (':', sched_dump);
4000 	  for (i = 0; i < ira_pressure_classes_num; i++)
4001 	    fprintf (sched_dump, "%s%s%+d(%d)",
4002 		     scheduled_insns.length () > 1
4003 		     && INSN_LUID (insn)
4004 		     < INSN_LUID (scheduled_insns[scheduled_insns.length () - 2]) ? "@" : "",
4005 		     reg_class_names[ira_pressure_classes[i]],
4006 		     pressure_info[i].set_increase, pressure_info[i].change);
4007 	}
4008       if (sched_pressure == SCHED_PRESSURE_MODEL
4009 	  && model_curr_point < model_num_insns
4010 	  && model_index (insn) == model_curr_point)
4011 	fprintf (sched_dump, ":model %d", model_curr_point);
4012       fputc ('\n', sched_dump);
4013     }
4014 
4015   if (sched_pressure == SCHED_PRESSURE_WEIGHTED && !DEBUG_INSN_P (insn))
4016     update_reg_and_insn_max_reg_pressure (insn);
4017 
4018   /* Scheduling instruction should have all its dependencies resolved and
4019      should have been removed from the ready list.  */
4020   gcc_assert (sd_lists_empty_p (insn, SD_LIST_HARD_BACK));
4021 
4022   /* Reset debug insns invalidated by moving this insn.  */
4023   if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
4024     for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
4025 	 sd_iterator_cond (&sd_it, &dep);)
4026       {
4027 	rtx_insn *dbg = DEP_PRO (dep);
4028 	struct reg_use_data *use, *next;
4029 
4030 	if (DEP_STATUS (dep) & DEP_CANCELLED)
4031 	  {
4032 	    sd_iterator_next (&sd_it);
4033 	    continue;
4034 	  }
4035 
4036 	gcc_assert (DEBUG_INSN_P (dbg));
4037 
4038 	if (sched_verbose >= 6)
4039 	  fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
4040 		   INSN_UID (dbg));
4041 
4042 	/* ??? Rather than resetting the debug insn, we might be able
4043 	   to emit a debug temp before the just-scheduled insn, but
4044 	   this would involve checking that the expression at the
4045 	   point of the debug insn is equivalent to the expression
4046 	   before the just-scheduled insn.  They might not be: the
4047 	   expression in the debug insn may depend on other insns not
4048 	   yet scheduled that set MEMs, REGs or even other debug
4049 	   insns.  It's not clear that attempting to preserve debug
4050 	   information in these cases is worth the effort, given how
4051 	   uncommon these resets are and the likelihood that the debug
4052 	   temps introduced won't survive the schedule change.  */
4053 	INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
4054 	df_insn_rescan (dbg);
4055 
4056 	/* Unknown location doesn't use any registers.  */
4057 	for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
4058 	  {
4059 	    struct reg_use_data *prev = use;
4060 
4061 	    /* Remove use from the cyclic next_regno_use chain first.  */
4062 	    while (prev->next_regno_use != use)
4063 	      prev = prev->next_regno_use;
4064 	    prev->next_regno_use = use->next_regno_use;
4065 	    next = use->next_insn_use;
4066 	    free (use);
4067 	  }
4068 	INSN_REG_USE_LIST (dbg) = NULL;
4069 
4070 	/* We delete rather than resolve these deps, otherwise we
4071 	   crash in sched_free_deps(), because forward deps are
4072 	   expected to be released before backward deps.  */
4073 	sd_delete_dep (sd_it);
4074       }
4075 
4076   gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
4077   QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
4078 
4079   if (sched_pressure == SCHED_PRESSURE_MODEL
4080       && model_curr_point < model_num_insns
4081       && NONDEBUG_INSN_P (insn))
4082     {
4083       if (model_index (insn) == model_curr_point)
4084 	do
4085 	  model_curr_point++;
4086 	while (model_curr_point < model_num_insns
4087 	       && (QUEUE_INDEX (MODEL_INSN (model_curr_point))
4088 		   == QUEUE_SCHEDULED));
4089       else
4090 	model_recompute (insn);
4091       model_update_limit_points ();
4092       update_register_pressure (insn);
4093       if (sched_verbose >= 2)
4094 	print_curr_reg_pressure ();
4095     }
4096 
4097   gcc_assert (INSN_TICK (insn) >= MIN_TICK);
4098   if (INSN_TICK (insn) > clock_var)
4099     /* INSN has been prematurely moved from the queue to the ready list.
4100        This is possible only if following flags are set.  */
4101     gcc_assert (flag_sched_stalled_insns || sched_fusion);
4102 
4103   /* ??? Probably, if INSN is scheduled prematurely, we should leave
4104      INSN_TICK untouched.  This is a machine-dependent issue, actually.  */
4105   INSN_TICK (insn) = clock_var;
4106 
4107   check_clobbered_conditions (insn);
4108 
4109   /* Update dependent instructions.  First, see if by scheduling this insn
4110      now we broke a dependence in a way that requires us to change another
4111      insn.  */
4112   for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4113        sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
4114     {
4115       struct dep_replacement *desc = DEP_REPLACE (dep);
4116       rtx_insn *pro = DEP_PRO (dep);
4117       if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED
4118 	  && desc != NULL && desc->insn == pro)
4119 	apply_replacement (dep, false);
4120     }
4121 
4122   /* Go through and resolve forward dependencies.  */
4123   for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4124        sd_iterator_cond (&sd_it, &dep);)
4125     {
4126       rtx_insn *next = DEP_CON (dep);
4127       bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0;
4128 
4129       /* Resolve the dependence between INSN and NEXT.
4130 	 sd_resolve_dep () moves current dep to another list thus
4131 	 advancing the iterator.  */
4132       sd_resolve_dep (sd_it);
4133 
4134       if (cancelled)
4135 	{
4136 	  if (must_restore_pattern_p (next, dep))
4137 	    restore_pattern (dep, false);
4138 	  continue;
4139 	}
4140 
4141       /* Don't bother trying to mark next as ready if insn is a debug
4142 	 insn.  If insn is the last hard dependency, it will have
4143 	 already been discounted.  */
4144       if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
4145 	continue;
4146 
4147       if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
4148 	{
4149 	  int effective_cost;
4150 
4151 	  effective_cost = try_ready (next);
4152 
4153 	  if (effective_cost >= 0
4154 	      && SCHED_GROUP_P (next)
4155 	      && advance < effective_cost)
4156 	    advance = effective_cost;
4157 	}
4158       else
4159 	/* Check always has only one forward dependence (to the first insn in
4160 	   the recovery block), therefore, this will be executed only once.  */
4161 	{
4162 	  gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
4163 	  fix_recovery_deps (RECOVERY_BLOCK (insn));
4164 	}
4165     }
4166 
4167   /* Annotate the instruction with issue information -- TImode
4168      indicates that the instruction is expected not to be able
4169      to issue on the same cycle as the previous insn.  A machine
4170      may use this information to decide how the instruction should
4171      be aligned.  */
4172   if (issue_rate > 1
4173       && GET_CODE (PATTERN (insn)) != USE
4174       && GET_CODE (PATTERN (insn)) != CLOBBER
4175       && !DEBUG_INSN_P (insn))
4176     {
4177       if (reload_completed)
4178 	PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
4179       last_clock_var = clock_var;
4180     }
4181 
4182   if (nonscheduled_insns_begin != NULL_RTX)
4183     /* Indicate to debug counters that INSN is scheduled.  */
4184     nonscheduled_insns_begin = insn;
4185 
4186   return advance;
4187 }
4188 
4189 /* Functions for handling of notes.  */
4190 
4191 /* Add note list that ends on FROM_END to the end of TO_ENDP.  */
4192 void
concat_note_lists(rtx_insn * from_end,rtx_insn ** to_endp)4193 concat_note_lists (rtx_insn *from_end, rtx_insn **to_endp)
4194 {
4195   rtx_insn *from_start;
4196 
4197   /* It's easy when have nothing to concat.  */
4198   if (from_end == NULL)
4199     return;
4200 
4201   /* It's also easy when destination is empty.  */
4202   if (*to_endp == NULL)
4203     {
4204       *to_endp = from_end;
4205       return;
4206     }
4207 
4208   from_start = from_end;
4209   while (PREV_INSN (from_start) != NULL)
4210     from_start = PREV_INSN (from_start);
4211 
4212   SET_PREV_INSN (from_start) = *to_endp;
4213   SET_NEXT_INSN (*to_endp) = from_start;
4214   *to_endp = from_end;
4215 }
4216 
4217 /* Delete notes between HEAD and TAIL and put them in the chain
4218    of notes ended by NOTE_LIST.  */
4219 void
remove_notes(rtx_insn * head,rtx_insn * tail)4220 remove_notes (rtx_insn *head, rtx_insn *tail)
4221 {
4222   rtx_insn *next_tail, *insn, *next;
4223 
4224   note_list = 0;
4225   if (head == tail && !INSN_P (head))
4226     return;
4227 
4228   next_tail = NEXT_INSN (tail);
4229   for (insn = head; insn != next_tail; insn = next)
4230     {
4231       next = NEXT_INSN (insn);
4232       if (!NOTE_P (insn))
4233 	continue;
4234 
4235       switch (NOTE_KIND (insn))
4236 	{
4237 	case NOTE_INSN_BASIC_BLOCK:
4238 	  continue;
4239 
4240 	case NOTE_INSN_EPILOGUE_BEG:
4241 	  if (insn != tail)
4242 	    {
4243 	      remove_insn (insn);
4244 	      add_reg_note (next, REG_SAVE_NOTE,
4245 			    GEN_INT (NOTE_INSN_EPILOGUE_BEG));
4246 	      break;
4247 	    }
4248 	  /* FALLTHRU */
4249 
4250 	default:
4251 	  remove_insn (insn);
4252 
4253 	  /* Add the note to list that ends at NOTE_LIST.  */
4254 	  SET_PREV_INSN (insn) = note_list;
4255 	  SET_NEXT_INSN (insn) = NULL_RTX;
4256 	  if (note_list)
4257 	    SET_NEXT_INSN (note_list) = insn;
4258 	  note_list = insn;
4259 	  break;
4260 	}
4261 
4262       gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
4263     }
4264 }
4265 
4266 /* A structure to record enough data to allow us to backtrack the scheduler to
4267    a previous state.  */
4268 struct haifa_saved_data
4269 {
4270   /* Next entry on the list.  */
4271   struct haifa_saved_data *next;
4272 
4273   /* Backtracking is associated with scheduling insns that have delay slots.
4274      DELAY_PAIR points to the structure that contains the insns involved, and
4275      the number of cycles between them.  */
4276   struct delay_pair *delay_pair;
4277 
4278   /* Data used by the frontend (e.g. sched-ebb or sched-rgn).  */
4279   void *fe_saved_data;
4280   /* Data used by the backend.  */
4281   void *be_saved_data;
4282 
4283   /* Copies of global state.  */
4284   int clock_var, last_clock_var;
4285   struct ready_list ready;
4286   state_t curr_state;
4287 
4288   rtx_insn *last_scheduled_insn;
4289   rtx_insn *last_nondebug_scheduled_insn;
4290   rtx_insn *nonscheduled_insns_begin;
4291   int cycle_issued_insns;
4292 
4293   /* Copies of state used in the inner loop of schedule_block.  */
4294   struct sched_block_state sched_block;
4295 
4296   /* We don't need to save q_ptr, as its value is arbitrary and we can set it
4297      to 0 when restoring.  */
4298   int q_size;
4299   rtx_insn_list **insn_queue;
4300 
4301   /* Describe pattern replacements that occurred since this backtrack point
4302      was queued.  */
4303   vec<dep_t> replacement_deps;
4304   vec<int> replace_apply;
4305 
4306   /* A copy of the next-cycle replacement vectors at the time of the backtrack
4307      point.  */
4308   vec<dep_t> next_cycle_deps;
4309   vec<int> next_cycle_apply;
4310 };
4311 
4312 /* A record, in reverse order, of all scheduled insns which have delay slots
4313    and may require backtracking.  */
4314 static struct haifa_saved_data *backtrack_queue;
4315 
4316 /* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
4317    to SET_P.  */
4318 static void
mark_backtrack_feeds(rtx_insn * insn,int set_p)4319 mark_backtrack_feeds (rtx_insn *insn, int set_p)
4320 {
4321   sd_iterator_def sd_it;
4322   dep_t dep;
4323   FOR_EACH_DEP (insn, SD_LIST_HARD_BACK, sd_it, dep)
4324     {
4325       FEEDS_BACKTRACK_INSN (DEP_PRO (dep)) = set_p;
4326     }
4327 }
4328 
4329 /* Save the current scheduler state so that we can backtrack to it
4330    later if necessary.  PAIR gives the insns that make it necessary to
4331    save this point.  SCHED_BLOCK is the local state of schedule_block
4332    that need to be saved.  */
4333 static void
save_backtrack_point(struct delay_pair * pair,struct sched_block_state sched_block)4334 save_backtrack_point (struct delay_pair *pair,
4335 		      struct sched_block_state sched_block)
4336 {
4337   int i;
4338   struct haifa_saved_data *save = XNEW (struct haifa_saved_data);
4339 
4340   save->curr_state = xmalloc (dfa_state_size);
4341   memcpy (save->curr_state, curr_state, dfa_state_size);
4342 
4343   save->ready.first = ready.first;
4344   save->ready.n_ready = ready.n_ready;
4345   save->ready.n_debug = ready.n_debug;
4346   save->ready.veclen = ready.veclen;
4347   save->ready.vec = XNEWVEC (rtx_insn *, ready.veclen);
4348   memcpy (save->ready.vec, ready.vec, ready.veclen * sizeof (rtx));
4349 
4350   save->insn_queue = XNEWVEC (rtx_insn_list *, max_insn_queue_index + 1);
4351   save->q_size = q_size;
4352   for (i = 0; i <= max_insn_queue_index; i++)
4353     {
4354       int q = NEXT_Q_AFTER (q_ptr, i);
4355       save->insn_queue[i] = copy_INSN_LIST (insn_queue[q]);
4356     }
4357 
4358   save->clock_var = clock_var;
4359   save->last_clock_var = last_clock_var;
4360   save->cycle_issued_insns = cycle_issued_insns;
4361   save->last_scheduled_insn = last_scheduled_insn;
4362   save->last_nondebug_scheduled_insn = last_nondebug_scheduled_insn;
4363   save->nonscheduled_insns_begin = nonscheduled_insns_begin;
4364 
4365   save->sched_block = sched_block;
4366 
4367   save->replacement_deps.create (0);
4368   save->replace_apply.create (0);
4369   save->next_cycle_deps = next_cycle_replace_deps.copy ();
4370   save->next_cycle_apply = next_cycle_apply.copy ();
4371 
4372   if (current_sched_info->save_state)
4373     save->fe_saved_data = (*current_sched_info->save_state) ();
4374 
4375   if (targetm.sched.alloc_sched_context)
4376     {
4377       save->be_saved_data = targetm.sched.alloc_sched_context ();
4378       targetm.sched.init_sched_context (save->be_saved_data, false);
4379     }
4380   else
4381     save->be_saved_data = NULL;
4382 
4383   save->delay_pair = pair;
4384 
4385   save->next = backtrack_queue;
4386   backtrack_queue = save;
4387 
4388   while (pair)
4389     {
4390       mark_backtrack_feeds (pair->i2, 1);
4391       INSN_TICK (pair->i2) = INVALID_TICK;
4392       INSN_EXACT_TICK (pair->i2) = clock_var + pair_delay (pair);
4393       SHADOW_P (pair->i2) = pair->stages == 0;
4394       pair = pair->next_same_i1;
4395     }
4396 }
4397 
4398 /* Walk the ready list and all queues. If any insns have unresolved backwards
4399    dependencies, these must be cancelled deps, broken by predication.  Set or
4400    clear (depending on SET) the DEP_CANCELLED bit in DEP_STATUS.  */
4401 
4402 static void
toggle_cancelled_flags(bool set)4403 toggle_cancelled_flags (bool set)
4404 {
4405   int i;
4406   sd_iterator_def sd_it;
4407   dep_t dep;
4408 
4409   if (ready.n_ready > 0)
4410     {
4411       rtx_insn **first = ready_lastpos (&ready);
4412       for (i = 0; i < ready.n_ready; i++)
4413 	FOR_EACH_DEP (first[i], SD_LIST_BACK, sd_it, dep)
4414 	  if (!DEBUG_INSN_P (DEP_PRO (dep)))
4415 	    {
4416 	      if (set)
4417 		DEP_STATUS (dep) |= DEP_CANCELLED;
4418 	      else
4419 		DEP_STATUS (dep) &= ~DEP_CANCELLED;
4420 	    }
4421     }
4422   for (i = 0; i <= max_insn_queue_index; i++)
4423     {
4424       int q = NEXT_Q_AFTER (q_ptr, i);
4425       rtx_insn_list *link;
4426       for (link = insn_queue[q]; link; link = link->next ())
4427 	{
4428 	  rtx_insn *insn = link->insn ();
4429 	  FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4430 	    if (!DEBUG_INSN_P (DEP_PRO (dep)))
4431 	      {
4432 		if (set)
4433 		  DEP_STATUS (dep) |= DEP_CANCELLED;
4434 		else
4435 		  DEP_STATUS (dep) &= ~DEP_CANCELLED;
4436 	      }
4437 	}
4438     }
4439 }
4440 
4441 /* Undo the replacements that have occurred after backtrack point SAVE
4442    was placed.  */
4443 static void
undo_replacements_for_backtrack(struct haifa_saved_data * save)4444 undo_replacements_for_backtrack (struct haifa_saved_data *save)
4445 {
4446   while (!save->replacement_deps.is_empty ())
4447     {
4448       dep_t dep = save->replacement_deps.pop ();
4449       int apply_p = save->replace_apply.pop ();
4450 
4451       if (apply_p)
4452 	restore_pattern (dep, true);
4453       else
4454 	apply_replacement (dep, true);
4455     }
4456   save->replacement_deps.release ();
4457   save->replace_apply.release ();
4458 }
4459 
4460 /* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
4461    Restore their dependencies to an unresolved state, and mark them as
4462    queued nowhere.  */
4463 
4464 static void
unschedule_insns_until(rtx_insn * insn)4465 unschedule_insns_until (rtx_insn *insn)
4466 {
4467   auto_vec<rtx_insn *> recompute_vec;
4468 
4469   /* Make two passes over the insns to be unscheduled.  First, we clear out
4470      dependencies and other trivial bookkeeping.  */
4471   for (;;)
4472     {
4473       rtx_insn *last;
4474       sd_iterator_def sd_it;
4475       dep_t dep;
4476 
4477       last = scheduled_insns.pop ();
4478 
4479       /* This will be changed by restore_backtrack_point if the insn is in
4480 	 any queue.  */
4481       QUEUE_INDEX (last) = QUEUE_NOWHERE;
4482       if (last != insn)
4483 	INSN_TICK (last) = INVALID_TICK;
4484 
4485       if (modulo_ii > 0 && INSN_UID (last) < modulo_iter0_max_uid)
4486 	modulo_insns_scheduled--;
4487 
4488       for (sd_it = sd_iterator_start (last, SD_LIST_RES_FORW);
4489 	   sd_iterator_cond (&sd_it, &dep);)
4490 	{
4491 	  rtx_insn *con = DEP_CON (dep);
4492 	  sd_unresolve_dep (sd_it);
4493 	  if (!MUST_RECOMPUTE_SPEC_P (con))
4494 	    {
4495 	      MUST_RECOMPUTE_SPEC_P (con) = 1;
4496 	      recompute_vec.safe_push (con);
4497 	    }
4498 	}
4499 
4500       if (last == insn)
4501 	break;
4502     }
4503 
4504   /* A second pass, to update ready and speculation status for insns
4505      depending on the unscheduled ones.  The first pass must have
4506      popped the scheduled_insns vector up to the point where we
4507      restart scheduling, as recompute_todo_spec requires it to be
4508      up-to-date.  */
4509   while (!recompute_vec.is_empty ())
4510     {
4511       rtx_insn *con;
4512 
4513       con = recompute_vec.pop ();
4514       MUST_RECOMPUTE_SPEC_P (con) = 0;
4515       if (!sd_lists_empty_p (con, SD_LIST_HARD_BACK))
4516 	{
4517 	  TODO_SPEC (con) = HARD_DEP;
4518 	  INSN_TICK (con) = INVALID_TICK;
4519 	  if (PREDICATED_PAT (con) != NULL_RTX)
4520 	    haifa_change_pattern (con, ORIG_PAT (con));
4521 	}
4522       else if (QUEUE_INDEX (con) != QUEUE_SCHEDULED)
4523 	TODO_SPEC (con) = recompute_todo_spec (con, true);
4524     }
4525 }
4526 
4527 /* Restore scheduler state from the topmost entry on the backtracking queue.
4528    PSCHED_BLOCK_P points to the local data of schedule_block that we must
4529    overwrite with the saved data.
4530    The caller must already have called unschedule_insns_until.  */
4531 
4532 static void
restore_last_backtrack_point(struct sched_block_state * psched_block)4533 restore_last_backtrack_point (struct sched_block_state *psched_block)
4534 {
4535   int i;
4536   struct haifa_saved_data *save = backtrack_queue;
4537 
4538   backtrack_queue = save->next;
4539 
4540   if (current_sched_info->restore_state)
4541     (*current_sched_info->restore_state) (save->fe_saved_data);
4542 
4543   if (targetm.sched.alloc_sched_context)
4544     {
4545       targetm.sched.set_sched_context (save->be_saved_data);
4546       targetm.sched.free_sched_context (save->be_saved_data);
4547     }
4548 
4549   /* Do this first since it clobbers INSN_TICK of the involved
4550      instructions.  */
4551   undo_replacements_for_backtrack (save);
4552 
4553   /* Clear the QUEUE_INDEX of everything in the ready list or one
4554      of the queues.  */
4555   if (ready.n_ready > 0)
4556     {
4557       rtx_insn **first = ready_lastpos (&ready);
4558       for (i = 0; i < ready.n_ready; i++)
4559 	{
4560 	  rtx_insn *insn = first[i];
4561 	  QUEUE_INDEX (insn) = QUEUE_NOWHERE;
4562 	  INSN_TICK (insn) = INVALID_TICK;
4563 	}
4564     }
4565   for (i = 0; i <= max_insn_queue_index; i++)
4566     {
4567       int q = NEXT_Q_AFTER (q_ptr, i);
4568 
4569       for (rtx_insn_list *link = insn_queue[q]; link; link = link->next ())
4570 	{
4571 	  rtx_insn *x = link->insn ();
4572 	  QUEUE_INDEX (x) = QUEUE_NOWHERE;
4573 	  INSN_TICK (x) = INVALID_TICK;
4574 	}
4575       free_INSN_LIST_list (&insn_queue[q]);
4576     }
4577 
4578   free (ready.vec);
4579   ready = save->ready;
4580 
4581   if (ready.n_ready > 0)
4582     {
4583       rtx_insn **first = ready_lastpos (&ready);
4584       for (i = 0; i < ready.n_ready; i++)
4585 	{
4586 	  rtx_insn *insn = first[i];
4587 	  QUEUE_INDEX (insn) = QUEUE_READY;
4588 	  TODO_SPEC (insn) = recompute_todo_spec (insn, true);
4589 	  INSN_TICK (insn) = save->clock_var;
4590 	}
4591     }
4592 
4593   q_ptr = 0;
4594   q_size = save->q_size;
4595   for (i = 0; i <= max_insn_queue_index; i++)
4596     {
4597       int q = NEXT_Q_AFTER (q_ptr, i);
4598 
4599       insn_queue[q] = save->insn_queue[q];
4600 
4601       for (rtx_insn_list *link = insn_queue[q]; link; link = link->next ())
4602 	{
4603 	  rtx_insn *x = link->insn ();
4604 	  QUEUE_INDEX (x) = i;
4605 	  TODO_SPEC (x) = recompute_todo_spec (x, true);
4606 	  INSN_TICK (x) = save->clock_var + i;
4607 	}
4608     }
4609   free (save->insn_queue);
4610 
4611   toggle_cancelled_flags (true);
4612 
4613   clock_var = save->clock_var;
4614   last_clock_var = save->last_clock_var;
4615   cycle_issued_insns = save->cycle_issued_insns;
4616   last_scheduled_insn = save->last_scheduled_insn;
4617   last_nondebug_scheduled_insn = save->last_nondebug_scheduled_insn;
4618   nonscheduled_insns_begin = save->nonscheduled_insns_begin;
4619 
4620   *psched_block = save->sched_block;
4621 
4622   memcpy (curr_state, save->curr_state, dfa_state_size);
4623   free (save->curr_state);
4624 
4625   mark_backtrack_feeds (save->delay_pair->i2, 0);
4626 
4627   gcc_assert (next_cycle_replace_deps.is_empty ());
4628   next_cycle_replace_deps = save->next_cycle_deps.copy ();
4629   next_cycle_apply = save->next_cycle_apply.copy ();
4630 
4631   free (save);
4632 
4633   for (save = backtrack_queue; save; save = save->next)
4634     {
4635       mark_backtrack_feeds (save->delay_pair->i2, 1);
4636     }
4637 }
4638 
4639 /* Discard all data associated with the topmost entry in the backtrack
4640    queue.  If RESET_TICK is false, we just want to free the data.  If true,
4641    we are doing this because we discovered a reason to backtrack.  In the
4642    latter case, also reset the INSN_TICK for the shadow insn.  */
4643 static void
free_topmost_backtrack_point(bool reset_tick)4644 free_topmost_backtrack_point (bool reset_tick)
4645 {
4646   struct haifa_saved_data *save = backtrack_queue;
4647   int i;
4648 
4649   backtrack_queue = save->next;
4650 
4651   if (reset_tick)
4652     {
4653       struct delay_pair *pair = save->delay_pair;
4654       while (pair)
4655 	{
4656 	  INSN_TICK (pair->i2) = INVALID_TICK;
4657 	  INSN_EXACT_TICK (pair->i2) = INVALID_TICK;
4658 	  pair = pair->next_same_i1;
4659 	}
4660       undo_replacements_for_backtrack (save);
4661     }
4662   else
4663     {
4664       save->replacement_deps.release ();
4665       save->replace_apply.release ();
4666     }
4667 
4668   if (targetm.sched.free_sched_context)
4669     targetm.sched.free_sched_context (save->be_saved_data);
4670   if (current_sched_info->restore_state)
4671     free (save->fe_saved_data);
4672   for (i = 0; i <= max_insn_queue_index; i++)
4673     free_INSN_LIST_list (&save->insn_queue[i]);
4674   free (save->insn_queue);
4675   free (save->curr_state);
4676   free (save->ready.vec);
4677   free (save);
4678 }
4679 
4680 /* Free the entire backtrack queue.  */
4681 static void
free_backtrack_queue(void)4682 free_backtrack_queue (void)
4683 {
4684   while (backtrack_queue)
4685     free_topmost_backtrack_point (false);
4686 }
4687 
4688 /* Apply a replacement described by DESC.  If IMMEDIATELY is false, we
4689    may have to postpone the replacement until the start of the next cycle,
4690    at which point we will be called again with IMMEDIATELY true.  This is
4691    only done for machines which have instruction packets with explicit
4692    parallelism however.  */
4693 static void
apply_replacement(dep_t dep,bool immediately)4694 apply_replacement (dep_t dep, bool immediately)
4695 {
4696   struct dep_replacement *desc = DEP_REPLACE (dep);
4697   if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4698     {
4699       next_cycle_replace_deps.safe_push (dep);
4700       next_cycle_apply.safe_push (1);
4701     }
4702   else
4703     {
4704       bool success;
4705 
4706       if (QUEUE_INDEX (desc->insn) == QUEUE_SCHEDULED)
4707 	return;
4708 
4709       if (sched_verbose >= 5)
4710 	fprintf (sched_dump, "applying replacement for insn %d\n",
4711 		 INSN_UID (desc->insn));
4712 
4713       success = validate_change (desc->insn, desc->loc, desc->newval, 0);
4714       gcc_assert (success);
4715 
4716       update_insn_after_change (desc->insn);
4717       if ((TODO_SPEC (desc->insn) & (HARD_DEP | DEP_POSTPONED)) == 0)
4718 	fix_tick_ready (desc->insn);
4719 
4720       if (backtrack_queue != NULL)
4721 	{
4722 	  backtrack_queue->replacement_deps.safe_push (dep);
4723 	  backtrack_queue->replace_apply.safe_push (1);
4724 	}
4725     }
4726 }
4727 
4728 /* We have determined that a pattern involved in DEP must be restored.
4729    If IMMEDIATELY is false, we may have to postpone the replacement
4730    until the start of the next cycle, at which point we will be called
4731    again with IMMEDIATELY true.  */
4732 static void
restore_pattern(dep_t dep,bool immediately)4733 restore_pattern (dep_t dep, bool immediately)
4734 {
4735   rtx_insn *next = DEP_CON (dep);
4736   int tick = INSN_TICK (next);
4737 
4738   /* If we already scheduled the insn, the modified version is
4739      correct.  */
4740   if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
4741     return;
4742 
4743   if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4744     {
4745       next_cycle_replace_deps.safe_push (dep);
4746       next_cycle_apply.safe_push (0);
4747       return;
4748     }
4749 
4750 
4751   if (DEP_TYPE (dep) == REG_DEP_CONTROL)
4752     {
4753       if (sched_verbose >= 5)
4754 	fprintf (sched_dump, "restoring pattern for insn %d\n",
4755 		 INSN_UID (next));
4756       haifa_change_pattern (next, ORIG_PAT (next));
4757     }
4758   else
4759     {
4760       struct dep_replacement *desc = DEP_REPLACE (dep);
4761       bool success;
4762 
4763       if (sched_verbose >= 5)
4764 	fprintf (sched_dump, "restoring pattern for insn %d\n",
4765 		 INSN_UID (desc->insn));
4766       tick = INSN_TICK (desc->insn);
4767 
4768       success = validate_change (desc->insn, desc->loc, desc->orig, 0);
4769       gcc_assert (success);
4770       update_insn_after_change (desc->insn);
4771       if (backtrack_queue != NULL)
4772 	{
4773 	  backtrack_queue->replacement_deps.safe_push (dep);
4774 	  backtrack_queue->replace_apply.safe_push (0);
4775 	}
4776     }
4777   INSN_TICK (next) = tick;
4778   if (TODO_SPEC (next) == DEP_POSTPONED)
4779     return;
4780 
4781   if (sd_lists_empty_p (next, SD_LIST_BACK))
4782     TODO_SPEC (next) = 0;
4783   else if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
4784     TODO_SPEC (next) = HARD_DEP;
4785 }
4786 
4787 /* Perform pattern replacements that were queued up until the next
4788    cycle.  */
4789 static void
perform_replacements_new_cycle(void)4790 perform_replacements_new_cycle (void)
4791 {
4792   int i;
4793   dep_t dep;
4794   FOR_EACH_VEC_ELT (next_cycle_replace_deps, i, dep)
4795     {
4796       int apply_p = next_cycle_apply[i];
4797       if (apply_p)
4798 	apply_replacement (dep, true);
4799       else
4800 	restore_pattern (dep, true);
4801     }
4802   next_cycle_replace_deps.truncate (0);
4803   next_cycle_apply.truncate (0);
4804 }
4805 
4806 /* Compute INSN_TICK_ESTIMATE for INSN.  PROCESSED is a bitmap of
4807    instructions we've previously encountered, a set bit prevents
4808    recursion.  BUDGET is a limit on how far ahead we look, it is
4809    reduced on recursive calls.  Return true if we produced a good
4810    estimate, or false if we exceeded the budget.  */
4811 static bool
estimate_insn_tick(bitmap processed,rtx_insn * insn,int budget)4812 estimate_insn_tick (bitmap processed, rtx_insn *insn, int budget)
4813 {
4814   sd_iterator_def sd_it;
4815   dep_t dep;
4816   int earliest = INSN_TICK (insn);
4817 
4818   FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4819     {
4820       rtx_insn *pro = DEP_PRO (dep);
4821       int t;
4822 
4823       if (DEP_STATUS (dep) & DEP_CANCELLED)
4824 	continue;
4825 
4826       if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
4827 	gcc_assert (INSN_TICK (pro) + dep_cost (dep) <= INSN_TICK (insn));
4828       else
4829 	{
4830 	  int cost = dep_cost (dep);
4831 	  if (cost >= budget)
4832 	    return false;
4833 	  if (!bitmap_bit_p (processed, INSN_LUID (pro)))
4834 	    {
4835 	      if (!estimate_insn_tick (processed, pro, budget - cost))
4836 		return false;
4837 	    }
4838 	  gcc_assert (INSN_TICK_ESTIMATE (pro) != INVALID_TICK);
4839 	  t = INSN_TICK_ESTIMATE (pro) + cost;
4840 	  if (earliest == INVALID_TICK || t > earliest)
4841 	    earliest = t;
4842 	}
4843     }
4844   bitmap_set_bit (processed, INSN_LUID (insn));
4845   INSN_TICK_ESTIMATE (insn) = earliest;
4846   return true;
4847 }
4848 
4849 /* Examine the pair of insns in P, and estimate (optimistically, assuming
4850    infinite resources) the cycle in which the delayed shadow can be issued.
4851    Return the number of cycles that must pass before the real insn can be
4852    issued in order to meet this constraint.  */
4853 static int
estimate_shadow_tick(struct delay_pair * p)4854 estimate_shadow_tick (struct delay_pair *p)
4855 {
4856   bitmap_head processed;
4857   int t;
4858   bool cutoff;
4859   bitmap_initialize (&processed, 0);
4860 
4861   cutoff = !estimate_insn_tick (&processed, p->i2,
4862 				max_insn_queue_index + pair_delay (p));
4863   bitmap_clear (&processed);
4864   if (cutoff)
4865     return max_insn_queue_index;
4866   t = INSN_TICK_ESTIMATE (p->i2) - (clock_var + pair_delay (p) + 1);
4867   if (t > 0)
4868     return t;
4869   return 0;
4870 }
4871 
4872 /* If INSN has no unresolved backwards dependencies, add it to the schedule and
4873    recursively resolve all its forward dependencies.  */
4874 static void
resolve_dependencies(rtx_insn * insn)4875 resolve_dependencies (rtx_insn *insn)
4876 {
4877   sd_iterator_def sd_it;
4878   dep_t dep;
4879 
4880   /* Don't use sd_lists_empty_p; it ignores debug insns.  */
4881   if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (insn)) != NULL
4882       || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (insn)) != NULL)
4883     return;
4884 
4885   if (sched_verbose >= 4)
4886     fprintf (sched_dump, ";;\tquickly resolving %d\n", INSN_UID (insn));
4887 
4888   if (QUEUE_INDEX (insn) >= 0)
4889     queue_remove (insn);
4890 
4891   scheduled_insns.safe_push (insn);
4892 
4893   /* Update dependent instructions.  */
4894   for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4895        sd_iterator_cond (&sd_it, &dep);)
4896     {
4897       rtx_insn *next = DEP_CON (dep);
4898 
4899       if (sched_verbose >= 4)
4900 	fprintf (sched_dump, ";;\t\tdep %d against %d\n", INSN_UID (insn),
4901 		 INSN_UID (next));
4902 
4903       /* Resolve the dependence between INSN and NEXT.
4904 	 sd_resolve_dep () moves current dep to another list thus
4905 	 advancing the iterator.  */
4906       sd_resolve_dep (sd_it);
4907 
4908       if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
4909 	{
4910 	  resolve_dependencies (next);
4911 	}
4912       else
4913 	/* Check always has only one forward dependence (to the first insn in
4914 	   the recovery block), therefore, this will be executed only once.  */
4915 	{
4916 	  gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
4917 	}
4918     }
4919 }
4920 
4921 
4922 /* Return the head and tail pointers of ebb starting at BEG and ending
4923    at END.  */
4924 void
get_ebb_head_tail(basic_block beg,basic_block end,rtx_insn ** headp,rtx_insn ** tailp)4925 get_ebb_head_tail (basic_block beg, basic_block end,
4926 		   rtx_insn **headp, rtx_insn **tailp)
4927 {
4928   rtx_insn *beg_head = BB_HEAD (beg);
4929   rtx_insn * beg_tail = BB_END (beg);
4930   rtx_insn * end_head = BB_HEAD (end);
4931   rtx_insn * end_tail = BB_END (end);
4932 
4933   /* Don't include any notes or labels at the beginning of the BEG
4934      basic block, or notes at the end of the END basic blocks.  */
4935 
4936   if (LABEL_P (beg_head))
4937     beg_head = NEXT_INSN (beg_head);
4938 
4939   while (beg_head != beg_tail)
4940     if (NOTE_P (beg_head))
4941       beg_head = NEXT_INSN (beg_head);
4942     else if (DEBUG_INSN_P (beg_head))
4943       {
4944 	rtx_insn * note, *next;
4945 
4946 	for (note = NEXT_INSN (beg_head);
4947 	     note != beg_tail;
4948 	     note = next)
4949 	  {
4950 	    next = NEXT_INSN (note);
4951 	    if (NOTE_P (note))
4952 	      {
4953 		if (sched_verbose >= 9)
4954 		  fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
4955 
4956 		reorder_insns_nobb (note, note, PREV_INSN (beg_head));
4957 
4958 		if (BLOCK_FOR_INSN (note) != beg)
4959 		  df_insn_change_bb (note, beg);
4960 	      }
4961 	    else if (!DEBUG_INSN_P (note))
4962 	      break;
4963 	  }
4964 
4965 	break;
4966       }
4967     else
4968       break;
4969 
4970   *headp = beg_head;
4971 
4972   if (beg == end)
4973     end_head = beg_head;
4974   else if (LABEL_P (end_head))
4975     end_head = NEXT_INSN (end_head);
4976 
4977   while (end_head != end_tail)
4978     if (NOTE_P (end_tail))
4979       end_tail = PREV_INSN (end_tail);
4980     else if (DEBUG_INSN_P (end_tail))
4981       {
4982 	rtx_insn * note, *prev;
4983 
4984 	for (note = PREV_INSN (end_tail);
4985 	     note != end_head;
4986 	     note = prev)
4987 	  {
4988 	    prev = PREV_INSN (note);
4989 	    if (NOTE_P (note))
4990 	      {
4991 		if (sched_verbose >= 9)
4992 		  fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
4993 
4994 		reorder_insns_nobb (note, note, end_tail);
4995 
4996 		if (end_tail == BB_END (end))
4997 		  BB_END (end) = note;
4998 
4999 		if (BLOCK_FOR_INSN (note) != end)
5000 		  df_insn_change_bb (note, end);
5001 	      }
5002 	    else if (!DEBUG_INSN_P (note))
5003 	      break;
5004 	  }
5005 
5006 	break;
5007       }
5008     else
5009       break;
5010 
5011   *tailp = end_tail;
5012 }
5013 
5014 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ].  */
5015 
5016 int
no_real_insns_p(const rtx_insn * head,const rtx_insn * tail)5017 no_real_insns_p (const rtx_insn *head, const rtx_insn *tail)
5018 {
5019   while (head != NEXT_INSN (tail))
5020     {
5021       if (!NOTE_P (head) && !LABEL_P (head))
5022 	return 0;
5023       head = NEXT_INSN (head);
5024     }
5025   return 1;
5026 }
5027 
5028 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
5029    previously found among the insns.  Insert them just before HEAD.  */
5030 rtx_insn *
restore_other_notes(rtx_insn * head,basic_block head_bb)5031 restore_other_notes (rtx_insn *head, basic_block head_bb)
5032 {
5033   if (note_list != 0)
5034     {
5035       rtx_insn *note_head = note_list;
5036 
5037       if (head)
5038 	head_bb = BLOCK_FOR_INSN (head);
5039       else
5040 	head = NEXT_INSN (bb_note (head_bb));
5041 
5042       while (PREV_INSN (note_head))
5043 	{
5044 	  set_block_for_insn (note_head, head_bb);
5045 	  note_head = PREV_INSN (note_head);
5046 	}
5047       /* In the above cycle we've missed this note.  */
5048       set_block_for_insn (note_head, head_bb);
5049 
5050       SET_PREV_INSN (note_head) = PREV_INSN (head);
5051       SET_NEXT_INSN (PREV_INSN (head)) = note_head;
5052       SET_PREV_INSN (head) = note_list;
5053       SET_NEXT_INSN (note_list) = head;
5054 
5055       if (BLOCK_FOR_INSN (head) != head_bb)
5056 	BB_END (head_bb) = note_list;
5057 
5058       head = note_head;
5059     }
5060 
5061   return head;
5062 }
5063 
5064 /* When we know we are going to discard the schedule due to a failed attempt
5065    at modulo scheduling, undo all replacements.  */
5066 static void
undo_all_replacements(void)5067 undo_all_replacements (void)
5068 {
5069   rtx_insn *insn;
5070   int i;
5071 
5072   FOR_EACH_VEC_ELT (scheduled_insns, i, insn)
5073     {
5074       sd_iterator_def sd_it;
5075       dep_t dep;
5076 
5077       /* See if we must undo a replacement.  */
5078       for (sd_it = sd_iterator_start (insn, SD_LIST_RES_FORW);
5079 	   sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
5080 	{
5081 	  struct dep_replacement *desc = DEP_REPLACE (dep);
5082 	  if (desc != NULL)
5083 	    validate_change (desc->insn, desc->loc, desc->orig, 0);
5084 	}
5085     }
5086 }
5087 
5088 /* Return first non-scheduled insn in the current scheduling block.
5089    This is mostly used for debug-counter purposes.  */
5090 static rtx_insn *
first_nonscheduled_insn(void)5091 first_nonscheduled_insn (void)
5092 {
5093   rtx_insn *insn = (nonscheduled_insns_begin != NULL_RTX
5094 		    ? nonscheduled_insns_begin
5095 		    : current_sched_info->prev_head);
5096 
5097   do
5098     {
5099       insn = next_nonnote_nondebug_insn (insn);
5100     }
5101   while (QUEUE_INDEX (insn) == QUEUE_SCHEDULED);
5102 
5103   return insn;
5104 }
5105 
5106 /* Move insns that became ready to fire from queue to ready list.  */
5107 
5108 static void
queue_to_ready(struct ready_list * ready)5109 queue_to_ready (struct ready_list *ready)
5110 {
5111   rtx_insn *insn;
5112   rtx_insn_list *link;
5113   rtx_insn *skip_insn;
5114 
5115   q_ptr = NEXT_Q (q_ptr);
5116 
5117   if (dbg_cnt (sched_insn) == false)
5118     /* If debug counter is activated do not requeue the first
5119        nonscheduled insn.  */
5120     skip_insn = first_nonscheduled_insn ();
5121   else
5122     skip_insn = NULL;
5123 
5124   /* Add all pending insns that can be scheduled without stalls to the
5125      ready list.  */
5126   for (link = insn_queue[q_ptr]; link; link = link->next ())
5127     {
5128       insn = link->insn ();
5129       q_size -= 1;
5130 
5131       if (sched_verbose >= 2)
5132 	fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
5133 		 (*current_sched_info->print_insn) (insn, 0));
5134 
5135       /* If the ready list is full, delay the insn for 1 cycle.
5136 	 See the comment in schedule_block for the rationale.  */
5137       if (!reload_completed
5138 	  && (ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
5139 	      || (sched_pressure == SCHED_PRESSURE_MODEL
5140 		  /* Limit pressure recalculations to MAX_SCHED_READY_INSNS
5141 		     instructions too.  */
5142 		  && model_index (insn) > (model_curr_point
5143 					   + MAX_SCHED_READY_INSNS)))
5144 	  && !(sched_pressure == SCHED_PRESSURE_MODEL
5145 	       && model_curr_point < model_num_insns
5146 	       /* Always allow the next model instruction to issue.  */
5147 	       && model_index (insn) == model_curr_point)
5148 	  && !SCHED_GROUP_P (insn)
5149 	  && insn != skip_insn)
5150 	{
5151 	  if (sched_verbose >= 2)
5152 	    fprintf (sched_dump, "keeping in queue, ready full\n");
5153 	  queue_insn (insn, 1, "ready full");
5154 	}
5155       else
5156 	{
5157 	  ready_add (ready, insn, false);
5158 	  if (sched_verbose >= 2)
5159 	    fprintf (sched_dump, "moving to ready without stalls\n");
5160         }
5161     }
5162   free_INSN_LIST_list (&insn_queue[q_ptr]);
5163 
5164   /* If there are no ready insns, stall until one is ready and add all
5165      of the pending insns at that point to the ready list.  */
5166   if (ready->n_ready == 0)
5167     {
5168       int stalls;
5169 
5170       for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
5171 	{
5172 	  if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5173 	    {
5174 	      for (; link; link = link->next ())
5175 		{
5176 		  insn = link->insn ();
5177 		  q_size -= 1;
5178 
5179 		  if (sched_verbose >= 2)
5180 		    fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
5181 			     (*current_sched_info->print_insn) (insn, 0));
5182 
5183 		  ready_add (ready, insn, false);
5184 		  if (sched_verbose >= 2)
5185 		    fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
5186 		}
5187 	      free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
5188 
5189 	      advance_one_cycle ();
5190 
5191 	      break;
5192 	    }
5193 
5194 	  advance_one_cycle ();
5195 	}
5196 
5197       q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
5198       clock_var += stalls;
5199       if (sched_verbose >= 2)
5200 	fprintf (sched_dump, ";;\tAdvancing clock by %d cycle[s] to %d\n",
5201 		 stalls, clock_var);
5202     }
5203 }
5204 
5205 /* Used by early_queue_to_ready.  Determines whether it is "ok" to
5206    prematurely move INSN from the queue to the ready list.  Currently,
5207    if a target defines the hook 'is_costly_dependence', this function
5208    uses the hook to check whether there exist any dependences which are
5209    considered costly by the target, between INSN and other insns that
5210    have already been scheduled.  Dependences are checked up to Y cycles
5211    back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
5212    controlling this value.
5213    (Other considerations could be taken into account instead (or in
5214    addition) depending on user flags and target hooks.  */
5215 
5216 static bool
ok_for_early_queue_removal(rtx_insn * insn)5217 ok_for_early_queue_removal (rtx_insn *insn)
5218 {
5219   if (targetm.sched.is_costly_dependence)
5220     {
5221       int n_cycles;
5222       int i = scheduled_insns.length ();
5223       for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
5224 	{
5225 	  while (i-- > 0)
5226 	    {
5227 	      int cost;
5228 
5229 	      rtx_insn *prev_insn = scheduled_insns[i];
5230 
5231 	      if (!NOTE_P (prev_insn))
5232 		{
5233 		  dep_t dep;
5234 
5235 		  dep = sd_find_dep_between (prev_insn, insn, true);
5236 
5237 		  if (dep != NULL)
5238 		    {
5239 		      cost = dep_cost (dep);
5240 
5241 		      if (targetm.sched.is_costly_dependence (dep, cost,
5242 				flag_sched_stalled_insns_dep - n_cycles))
5243 			return false;
5244 		    }
5245 		}
5246 
5247 	      if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
5248 		break;
5249 	    }
5250 
5251 	  if (i == 0)
5252 	    break;
5253 	}
5254     }
5255 
5256   return true;
5257 }
5258 
5259 
5260 /* Remove insns from the queue, before they become "ready" with respect
5261    to FU latency considerations.  */
5262 
5263 static int
early_queue_to_ready(state_t state,struct ready_list * ready)5264 early_queue_to_ready (state_t state, struct ready_list *ready)
5265 {
5266   rtx_insn *insn;
5267   rtx_insn_list *link;
5268   rtx_insn_list *next_link;
5269   rtx_insn_list *prev_link;
5270   bool move_to_ready;
5271   int cost;
5272   state_t temp_state = alloca (dfa_state_size);
5273   int stalls;
5274   int insns_removed = 0;
5275 
5276   /*
5277      Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
5278      function:
5279 
5280      X == 0: There is no limit on how many queued insns can be removed
5281              prematurely.  (flag_sched_stalled_insns = -1).
5282 
5283      X >= 1: Only X queued insns can be removed prematurely in each
5284 	     invocation.  (flag_sched_stalled_insns = X).
5285 
5286      Otherwise: Early queue removal is disabled.
5287          (flag_sched_stalled_insns = 0)
5288   */
5289 
5290   if (! flag_sched_stalled_insns)
5291     return 0;
5292 
5293   for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
5294     {
5295       if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5296 	{
5297 	  if (sched_verbose > 6)
5298 	    fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
5299 
5300 	  prev_link = 0;
5301 	  while (link)
5302 	    {
5303 	      next_link = link->next ();
5304 	      insn = link->insn ();
5305 	      if (insn && sched_verbose > 6)
5306 		print_rtl_single (sched_dump, insn);
5307 
5308 	      memcpy (temp_state, state, dfa_state_size);
5309 	      if (recog_memoized (insn) < 0)
5310 		/* non-negative to indicate that it's not ready
5311 		   to avoid infinite Q->R->Q->R... */
5312 		cost = 0;
5313 	      else
5314 		cost = state_transition (temp_state, insn);
5315 
5316 	      if (sched_verbose >= 6)
5317 		fprintf (sched_dump, "transition cost = %d\n", cost);
5318 
5319 	      move_to_ready = false;
5320 	      if (cost < 0)
5321 		{
5322 		  move_to_ready = ok_for_early_queue_removal (insn);
5323 		  if (move_to_ready == true)
5324 		    {
5325 		      /* move from Q to R */
5326 		      q_size -= 1;
5327 		      ready_add (ready, insn, false);
5328 
5329 		      if (prev_link)
5330 			XEXP (prev_link, 1) = next_link;
5331 		      else
5332 			insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
5333 
5334 		      free_INSN_LIST_node (link);
5335 
5336 		      if (sched_verbose >= 2)
5337 			fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
5338 				 (*current_sched_info->print_insn) (insn, 0));
5339 
5340 		      insns_removed++;
5341 		      if (insns_removed == flag_sched_stalled_insns)
5342 			/* Remove no more than flag_sched_stalled_insns insns
5343 			   from Q at a time.  */
5344 			return insns_removed;
5345 		    }
5346 		}
5347 
5348 	      if (move_to_ready == false)
5349 		prev_link = link;
5350 
5351 	      link = next_link;
5352 	    } /* while link */
5353 	} /* if link */
5354 
5355     } /* for stalls.. */
5356 
5357   return insns_removed;
5358 }
5359 
5360 
5361 /* Print the ready list for debugging purposes.
5362    If READY_TRY is non-zero then only print insns that max_issue
5363    will consider.  */
5364 static void
debug_ready_list_1(struct ready_list * ready,signed char * ready_try)5365 debug_ready_list_1 (struct ready_list *ready, signed char *ready_try)
5366 {
5367   rtx_insn **p;
5368   int i;
5369 
5370   if (ready->n_ready == 0)
5371     {
5372       fprintf (sched_dump, "\n");
5373       return;
5374     }
5375 
5376   p = ready_lastpos (ready);
5377   for (i = 0; i < ready->n_ready; i++)
5378     {
5379       if (ready_try != NULL && ready_try[ready->n_ready - i - 1])
5380 	continue;
5381 
5382       fprintf (sched_dump, "  %s:%d",
5383 	       (*current_sched_info->print_insn) (p[i], 0),
5384 	       INSN_LUID (p[i]));
5385       if (sched_pressure != SCHED_PRESSURE_NONE)
5386 	fprintf (sched_dump, "(cost=%d",
5387 		 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
5388       fprintf (sched_dump, ":prio=%d", INSN_PRIORITY (p[i]));
5389       if (INSN_TICK (p[i]) > clock_var)
5390 	fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
5391       if (sched_pressure == SCHED_PRESSURE_MODEL)
5392 	fprintf (sched_dump, ":idx=%d",
5393 		 model_index (p[i]));
5394       if (sched_pressure != SCHED_PRESSURE_NONE)
5395 	fprintf (sched_dump, ")");
5396     }
5397   fprintf (sched_dump, "\n");
5398 }
5399 
5400 /* Print the ready list.  Callable from debugger.  */
5401 static void
debug_ready_list(struct ready_list * ready)5402 debug_ready_list (struct ready_list *ready)
5403 {
5404   debug_ready_list_1 (ready, NULL);
5405 }
5406 
5407 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
5408    NOTEs.  This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
5409    replaces the epilogue note in the correct basic block.  */
5410 void
reemit_notes(rtx_insn * insn)5411 reemit_notes (rtx_insn *insn)
5412 {
5413   rtx note;
5414   rtx_insn *last = insn;
5415 
5416   for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
5417     {
5418       if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
5419 	{
5420 	  enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
5421 
5422 	  last = emit_note_before (note_type, last);
5423 	  remove_note (insn, note);
5424 	}
5425     }
5426 }
5427 
5428 /* Move INSN.  Reemit notes if needed.  Update CFG, if needed.  */
5429 static void
move_insn(rtx_insn * insn,rtx_insn * last,rtx nt)5430 move_insn (rtx_insn *insn, rtx_insn *last, rtx nt)
5431 {
5432   if (PREV_INSN (insn) != last)
5433     {
5434       basic_block bb;
5435       rtx_insn *note;
5436       int jump_p = 0;
5437 
5438       bb = BLOCK_FOR_INSN (insn);
5439 
5440       /* BB_HEAD is either LABEL or NOTE.  */
5441       gcc_assert (BB_HEAD (bb) != insn);
5442 
5443       if (BB_END (bb) == insn)
5444 	/* If this is last instruction in BB, move end marker one
5445 	   instruction up.  */
5446 	{
5447 	  /* Jumps are always placed at the end of basic block.  */
5448 	  jump_p = control_flow_insn_p (insn);
5449 
5450 	  gcc_assert (!jump_p
5451 		      || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
5452 			  && IS_SPECULATION_BRANCHY_CHECK_P (insn))
5453 		      || (common_sched_info->sched_pass_id
5454 			  == SCHED_EBB_PASS));
5455 
5456 	  gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
5457 
5458 	  BB_END (bb) = PREV_INSN (insn);
5459 	}
5460 
5461       gcc_assert (BB_END (bb) != last);
5462 
5463       if (jump_p)
5464 	/* We move the block note along with jump.  */
5465 	{
5466 	  gcc_assert (nt);
5467 
5468 	  note = NEXT_INSN (insn);
5469 	  while (NOTE_NOT_BB_P (note) && note != nt)
5470 	    note = NEXT_INSN (note);
5471 
5472 	  if (note != nt
5473 	      && (LABEL_P (note)
5474 		  || BARRIER_P (note)))
5475 	    note = NEXT_INSN (note);
5476 
5477 	  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5478 	}
5479       else
5480 	note = insn;
5481 
5482       SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
5483       SET_PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
5484 
5485       SET_NEXT_INSN (note) = NEXT_INSN (last);
5486       SET_PREV_INSN (NEXT_INSN (last)) = note;
5487 
5488       SET_NEXT_INSN (last) = insn;
5489       SET_PREV_INSN (insn) = last;
5490 
5491       bb = BLOCK_FOR_INSN (last);
5492 
5493       if (jump_p)
5494 	{
5495 	  fix_jump_move (insn);
5496 
5497 	  if (BLOCK_FOR_INSN (insn) != bb)
5498 	    move_block_after_check (insn);
5499 
5500 	  gcc_assert (BB_END (bb) == last);
5501 	}
5502 
5503       df_insn_change_bb (insn, bb);
5504 
5505       /* Update BB_END, if needed.  */
5506       if (BB_END (bb) == last)
5507 	BB_END (bb) = insn;
5508     }
5509 
5510   SCHED_GROUP_P (insn) = 0;
5511 }
5512 
5513 /* Return true if scheduling INSN will finish current clock cycle.  */
5514 static bool
insn_finishes_cycle_p(rtx_insn * insn)5515 insn_finishes_cycle_p (rtx_insn *insn)
5516 {
5517   if (SCHED_GROUP_P (insn))
5518     /* After issuing INSN, rest of the sched_group will be forced to issue
5519        in order.  Don't make any plans for the rest of cycle.  */
5520     return true;
5521 
5522   /* Finishing the block will, apparently, finish the cycle.  */
5523   if (current_sched_info->insn_finishes_block_p
5524       && current_sched_info->insn_finishes_block_p (insn))
5525     return true;
5526 
5527   return false;
5528 }
5529 
5530 /* Helper for autopref_multipass_init.  Given a SET in PAT and whether
5531    we're expecting a memory WRITE or not, check that the insn is relevant to
5532    the autoprefetcher modelling code.  Return true iff that is the case.
5533    If it is relevant, record the base register of the memory op in BASE and
5534    the offset in OFFSET.  */
5535 
5536 static bool
analyze_set_insn_for_autopref(rtx pat,bool write,rtx * base,int * offset)5537 analyze_set_insn_for_autopref (rtx pat, bool write, rtx *base, int *offset)
5538 {
5539   if (GET_CODE (pat) != SET)
5540     return false;
5541 
5542   rtx mem = write ? SET_DEST (pat) : SET_SRC (pat);
5543   if (!MEM_P (mem))
5544     return false;
5545 
5546   struct address_info info;
5547   decompose_mem_address (&info, mem);
5548 
5549   /* TODO: Currently only (base+const) addressing is supported.  */
5550   if (info.base == NULL || !REG_P (*info.base)
5551       || (info.disp != NULL && !CONST_INT_P (*info.disp)))
5552     return false;
5553 
5554   *base = *info.base;
5555   *offset = info.disp ? INTVAL (*info.disp) : 0;
5556   return true;
5557 }
5558 
5559 /* Functions to model cache auto-prefetcher.
5560 
5561    Some of the CPUs have cache auto-prefetcher, which /seems/ to initiate
5562    memory prefetches if it sees instructions with consequitive memory accesses
5563    in the instruction stream.  Details of such hardware units are not published,
5564    so we can only guess what exactly is going on there.
5565    In the scheduler, we model abstract auto-prefetcher.  If there are memory
5566    insns in the ready list (or the queue) that have same memory base, but
5567    different offsets, then we delay the insns with larger offsets until insns
5568    with smaller offsets get scheduled.  If PARAM_SCHED_AUTOPREF_QUEUE_DEPTH
5569    is "1", then we look at the ready list; if it is N>1, then we also look
5570    through N-1 queue entries.
5571    If the param is N>=0, then rank_for_schedule will consider auto-prefetching
5572    among its heuristics.
5573    Param value of "-1" disables modelling of the auto-prefetcher.  */
5574 
5575 /* Initialize autoprefetcher model data for INSN.  */
5576 static void
autopref_multipass_init(const rtx_insn * insn,int write)5577 autopref_multipass_init (const rtx_insn *insn, int write)
5578 {
5579   autopref_multipass_data_t data = &INSN_AUTOPREF_MULTIPASS_DATA (insn)[write];
5580 
5581   gcc_assert (data->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED);
5582   data->base = NULL_RTX;
5583   data->min_offset = 0;
5584   data->max_offset = 0;
5585   data->multi_mem_insn_p = false;
5586   /* Set insn entry initialized, but not relevant for auto-prefetcher.  */
5587   data->status = AUTOPREF_MULTIPASS_DATA_IRRELEVANT;
5588 
5589   rtx pat = PATTERN (insn);
5590 
5591   /* We have a multi-set insn like a load-multiple or store-multiple.
5592      We care about these as long as all the memory ops inside the PARALLEL
5593      have the same base register.  We care about the minimum and maximum
5594      offsets from that base but don't check for the order of those offsets
5595      within the PARALLEL insn itself.  */
5596   if (GET_CODE (pat) == PARALLEL)
5597     {
5598       int n_elems = XVECLEN (pat, 0);
5599 
5600       int i = 0;
5601       rtx prev_base = NULL_RTX;
5602       int min_offset = 0;
5603       int max_offset = 0;
5604 
5605       for (i = 0; i < n_elems; i++)
5606 	{
5607 	  rtx set = XVECEXP (pat, 0, i);
5608 	  if (GET_CODE (set) != SET)
5609 	    return;
5610 
5611 	  rtx base = NULL_RTX;
5612 	  int offset = 0;
5613 	  if (!analyze_set_insn_for_autopref (set, write, &base, &offset))
5614 	    return;
5615 
5616 	  if (i == 0)
5617 	    {
5618 	      prev_base = base;
5619 	      min_offset = offset;
5620 	      max_offset = offset;
5621 	    }
5622 	  /* Ensure that all memory operations in the PARALLEL use the same
5623 	     base register.  */
5624 	  else if (REGNO (base) != REGNO (prev_base))
5625 	    return;
5626 	  else
5627 	    {
5628 	      min_offset = MIN (min_offset, offset);
5629 	      max_offset = MAX (max_offset, offset);
5630 	    }
5631 	}
5632 
5633       /* If we reached here then we have a valid PARALLEL of multiple memory
5634 	 ops with prev_base as the base and min_offset and max_offset
5635 	 containing the offsets range.  */
5636       gcc_assert (prev_base);
5637       data->base = prev_base;
5638       data->min_offset = min_offset;
5639       data->max_offset = max_offset;
5640       data->multi_mem_insn_p = true;
5641       data->status = AUTOPREF_MULTIPASS_DATA_NORMAL;
5642 
5643       return;
5644     }
5645 
5646   /* Otherwise this is a single set memory operation.  */
5647   rtx set = single_set (insn);
5648   if (set == NULL_RTX)
5649     return;
5650 
5651   if (!analyze_set_insn_for_autopref (set, write, &data->base,
5652 				       &data->min_offset))
5653     return;
5654 
5655   /* This insn is relevant for the auto-prefetcher.
5656      The base and offset fields will have been filled in the
5657      analyze_set_insn_for_autopref call above.  */
5658   data->status = AUTOPREF_MULTIPASS_DATA_NORMAL;
5659 }
5660 
5661 
5662 /* Helper for autopref_rank_for_schedule.  Given the data of two
5663    insns relevant to the auto-prefetcher modelling code DATA1 and DATA2
5664    return their comparison result.  Return 0 if there is no sensible
5665    ranking order for the two insns.  */
5666 
5667 static int
autopref_rank_data(autopref_multipass_data_t data1,autopref_multipass_data_t data2)5668 autopref_rank_data (autopref_multipass_data_t data1,
5669 		     autopref_multipass_data_t data2)
5670 {
5671   /* Simple case when both insns are simple single memory ops.  */
5672   if (!data1->multi_mem_insn_p && !data2->multi_mem_insn_p)
5673     return data1->min_offset - data2->min_offset;
5674 
5675   /* Two load/store multiple insns.  Return 0 if the offset ranges
5676      overlap and the difference between the minimum offsets otherwise.  */
5677   else if (data1->multi_mem_insn_p && data2->multi_mem_insn_p)
5678     {
5679       int min1 = data1->min_offset;
5680       int max1 = data1->max_offset;
5681       int min2 = data2->min_offset;
5682       int max2 = data2->max_offset;
5683 
5684       if (max1 < min2 || min1 > max2)
5685 	return min1 - min2;
5686       else
5687 	return 0;
5688     }
5689 
5690   /* The other two cases is a pair of a load/store multiple and
5691      a simple memory op.  Return 0 if the single op's offset is within the
5692      range of the multi-op insn and the difference between the single offset
5693      and the minimum offset of the multi-set insn otherwise.  */
5694   else if (data1->multi_mem_insn_p && !data2->multi_mem_insn_p)
5695     {
5696       int max1 = data1->max_offset;
5697       int min1 = data1->min_offset;
5698 
5699       if (data2->min_offset >= min1
5700 	  && data2->min_offset <= max1)
5701 	return 0;
5702       else
5703 	return min1 - data2->min_offset;
5704     }
5705   else
5706     {
5707       int max2 = data2->max_offset;
5708       int min2 = data2->min_offset;
5709 
5710       if (data1->min_offset >= min2
5711 	  && data1->min_offset <= max2)
5712 	return 0;
5713       else
5714 	return data1->min_offset - min2;
5715     }
5716 }
5717 
5718 /* Helper function for rank_for_schedule sorting.  */
5719 static int
autopref_rank_for_schedule(const rtx_insn * insn1,const rtx_insn * insn2)5720 autopref_rank_for_schedule (const rtx_insn *insn1, const rtx_insn *insn2)
5721 {
5722   for (int write = 0; write < 2; ++write)
5723     {
5724       autopref_multipass_data_t data1
5725 	= &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5726       autopref_multipass_data_t data2
5727 	= &INSN_AUTOPREF_MULTIPASS_DATA (insn2)[write];
5728 
5729       if (data1->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5730 	autopref_multipass_init (insn1, write);
5731       if (data1->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5732 	continue;
5733 
5734       if (data2->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5735 	autopref_multipass_init (insn2, write);
5736       if (data2->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5737 	continue;
5738 
5739       if (!rtx_equal_p (data1->base, data2->base))
5740 	continue;
5741 
5742       return autopref_rank_data (data1, data2);
5743     }
5744 
5745   return 0;
5746 }
5747 
5748 /* True if header of debug dump was printed.  */
5749 static bool autopref_multipass_dfa_lookahead_guard_started_dump_p;
5750 
5751 /* Helper for autopref_multipass_dfa_lookahead_guard.
5752    Return "1" if INSN1 should be delayed in favor of INSN2.  */
5753 static int
autopref_multipass_dfa_lookahead_guard_1(const rtx_insn * insn1,const rtx_insn * insn2,int write)5754 autopref_multipass_dfa_lookahead_guard_1 (const rtx_insn *insn1,
5755 					  const rtx_insn *insn2, int write)
5756 {
5757   autopref_multipass_data_t data1
5758     = &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5759   autopref_multipass_data_t data2
5760     = &INSN_AUTOPREF_MULTIPASS_DATA (insn2)[write];
5761 
5762   if (data2->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5763     autopref_multipass_init (insn2, write);
5764   if (data2->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5765     return 0;
5766 
5767   if (rtx_equal_p (data1->base, data2->base)
5768       && autopref_rank_data (data1, data2) > 0)
5769     {
5770       if (sched_verbose >= 2)
5771 	{
5772           if (!autopref_multipass_dfa_lookahead_guard_started_dump_p)
5773 	    {
5774 	      fprintf (sched_dump,
5775 		       ";;\t\tnot trying in max_issue due to autoprefetch "
5776 		       "model: ");
5777 	      autopref_multipass_dfa_lookahead_guard_started_dump_p = true;
5778 	    }
5779 
5780 	  fprintf (sched_dump, " %d(%d)", INSN_UID (insn1), INSN_UID (insn2));
5781 	}
5782 
5783       return 1;
5784     }
5785 
5786   return 0;
5787 }
5788 
5789 /* General note:
5790 
5791    We could have also hooked autoprefetcher model into
5792    first_cycle_multipass_backtrack / first_cycle_multipass_issue hooks
5793    to enable intelligent selection of "[r1+0]=r2; [r1+4]=r3" on the same cycle
5794    (e.g., once "[r1+0]=r2" is issued in max_issue(), "[r1+4]=r3" gets
5795    unblocked).  We don't bother about this yet because target of interest
5796    (ARM Cortex-A15) can issue only 1 memory operation per cycle.  */
5797 
5798 /* Implementation of first_cycle_multipass_dfa_lookahead_guard hook.
5799    Return "1" if INSN1 should not be considered in max_issue due to
5800    auto-prefetcher considerations.  */
5801 int
autopref_multipass_dfa_lookahead_guard(rtx_insn * insn1,int ready_index)5802 autopref_multipass_dfa_lookahead_guard (rtx_insn *insn1, int ready_index)
5803 {
5804   int r = 0;
5805 
5806   /* Exit early if the param forbids this or if we're not entering here through
5807      normal haifa scheduling.  This can happen if selective scheduling is
5808      explicitly enabled.  */
5809   if (!insn_queue || PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) <= 0)
5810     return 0;
5811 
5812   if (sched_verbose >= 2 && ready_index == 0)
5813     autopref_multipass_dfa_lookahead_guard_started_dump_p = false;
5814 
5815   for (int write = 0; write < 2; ++write)
5816     {
5817       autopref_multipass_data_t data1
5818 	= &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5819 
5820       if (data1->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5821 	autopref_multipass_init (insn1, write);
5822       if (data1->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5823 	continue;
5824 
5825       if (ready_index == 0
5826 	  && data1->status == AUTOPREF_MULTIPASS_DATA_DONT_DELAY)
5827 	/* We allow only a single delay on priviledged instructions.
5828 	   Doing otherwise would cause infinite loop.  */
5829 	{
5830 	  if (sched_verbose >= 2)
5831 	    {
5832 	      if (!autopref_multipass_dfa_lookahead_guard_started_dump_p)
5833 		{
5834 		  fprintf (sched_dump,
5835 			   ";;\t\tnot trying in max_issue due to autoprefetch "
5836 			   "model: ");
5837 		  autopref_multipass_dfa_lookahead_guard_started_dump_p = true;
5838 		}
5839 
5840 	      fprintf (sched_dump, " *%d*", INSN_UID (insn1));
5841 	    }
5842 	  continue;
5843 	}
5844 
5845       for (int i2 = 0; i2 < ready.n_ready; ++i2)
5846 	{
5847 	  rtx_insn *insn2 = get_ready_element (i2);
5848 	  if (insn1 == insn2)
5849 	    continue;
5850 	  r = autopref_multipass_dfa_lookahead_guard_1 (insn1, insn2, write);
5851 	  if (r)
5852 	    {
5853 	      if (ready_index == 0)
5854 		{
5855 		  r = -1;
5856 		  data1->status = AUTOPREF_MULTIPASS_DATA_DONT_DELAY;
5857 		}
5858 	      goto finish;
5859 	    }
5860 	}
5861 
5862       if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) == 1)
5863 	continue;
5864 
5865       /* Everything from the current queue slot should have been moved to
5866 	 the ready list.  */
5867       gcc_assert (insn_queue[NEXT_Q_AFTER (q_ptr, 0)] == NULL_RTX);
5868 
5869       int n_stalls = PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) - 1;
5870       if (n_stalls > max_insn_queue_index)
5871 	n_stalls = max_insn_queue_index;
5872 
5873       for (int stalls = 1; stalls <= n_stalls; ++stalls)
5874 	{
5875 	  for (rtx_insn_list *link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)];
5876 	       link != NULL_RTX;
5877 	       link = link->next ())
5878 	    {
5879 	      rtx_insn *insn2 = link->insn ();
5880 	      r = autopref_multipass_dfa_lookahead_guard_1 (insn1, insn2,
5881 							    write);
5882 	      if (r)
5883 		{
5884 		  /* Queue INSN1 until INSN2 can issue.  */
5885 		  r = -stalls;
5886 		  if (ready_index == 0)
5887 		    data1->status = AUTOPREF_MULTIPASS_DATA_DONT_DELAY;
5888 		  goto finish;
5889 		}
5890 	    }
5891 	}
5892     }
5893 
5894     finish:
5895   if (sched_verbose >= 2
5896       && autopref_multipass_dfa_lookahead_guard_started_dump_p
5897       && (ready_index == ready.n_ready - 1 || r < 0))
5898     /* This does not /always/ trigger.  We don't output EOL if the last
5899        insn is not recognized (INSN_CODE < 0) and lookahead_guard is not
5900        called.  We can live with this.  */
5901     fprintf (sched_dump, "\n");
5902 
5903   return r;
5904 }
5905 
5906 /* Define type for target data used in multipass scheduling.  */
5907 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
5908 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
5909 #endif
5910 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t;
5911 
5912 /* The following structure describe an entry of the stack of choices.  */
5913 struct choice_entry
5914 {
5915   /* Ordinal number of the issued insn in the ready queue.  */
5916   int index;
5917   /* The number of the rest insns whose issues we should try.  */
5918   int rest;
5919   /* The number of issued essential insns.  */
5920   int n;
5921   /* State after issuing the insn.  */
5922   state_t state;
5923   /* Target-specific data.  */
5924   first_cycle_multipass_data_t target_data;
5925 };
5926 
5927 /* The following array is used to implement a stack of choices used in
5928    function max_issue.  */
5929 static struct choice_entry *choice_stack;
5930 
5931 /* This holds the value of the target dfa_lookahead hook.  */
5932 int dfa_lookahead;
5933 
5934 /* The following variable value is maximal number of tries of issuing
5935    insns for the first cycle multipass insn scheduling.  We define
5936    this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE).  We would not
5937    need this constraint if all real insns (with non-negative codes)
5938    had reservations because in this case the algorithm complexity is
5939    O(DFA_LOOKAHEAD**ISSUE_RATE).  Unfortunately, the dfa descriptions
5940    might be incomplete and such insn might occur.  For such
5941    descriptions, the complexity of algorithm (without the constraint)
5942    could achieve DFA_LOOKAHEAD ** N , where N is the queue length.  */
5943 static int max_lookahead_tries;
5944 
5945 /* The following function returns maximal (or close to maximal) number
5946    of insns which can be issued on the same cycle and one of which
5947    insns is insns with the best rank (the first insn in READY).  To
5948    make this function tries different samples of ready insns.  READY
5949    is current queue `ready'.  Global array READY_TRY reflects what
5950    insns are already issued in this try.  The function stops immediately,
5951    if it reached the such a solution, that all instruction can be issued.
5952    INDEX will contain index of the best insn in READY.  The following
5953    function is used only for first cycle multipass scheduling.
5954 
5955    PRIVILEGED_N >= 0
5956 
5957    This function expects recognized insns only.  All USEs,
5958    CLOBBERs, etc must be filtered elsewhere.  */
5959 int
max_issue(struct ready_list * ready,int privileged_n,state_t state,bool first_cycle_insn_p,int * index)5960 max_issue (struct ready_list *ready, int privileged_n, state_t state,
5961 	   bool first_cycle_insn_p, int *index)
5962 {
5963   int n, i, all, n_ready, best, delay, tries_num;
5964   int more_issue;
5965   struct choice_entry *top;
5966   rtx_insn *insn;
5967 
5968   if (sched_fusion)
5969     return 0;
5970 
5971   n_ready = ready->n_ready;
5972   gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
5973 	      && privileged_n <= n_ready);
5974 
5975   /* Init MAX_LOOKAHEAD_TRIES.  */
5976   if (max_lookahead_tries == 0)
5977     {
5978       max_lookahead_tries = 100;
5979       for (i = 0; i < issue_rate; i++)
5980 	max_lookahead_tries *= dfa_lookahead;
5981     }
5982 
5983   /* Init max_points.  */
5984   more_issue = issue_rate - cycle_issued_insns;
5985   gcc_assert (more_issue >= 0);
5986 
5987   /* The number of the issued insns in the best solution.  */
5988   best = 0;
5989 
5990   top = choice_stack;
5991 
5992   /* Set initial state of the search.  */
5993   memcpy (top->state, state, dfa_state_size);
5994   top->rest = dfa_lookahead;
5995   top->n = 0;
5996   if (targetm.sched.first_cycle_multipass_begin)
5997     targetm.sched.first_cycle_multipass_begin (&top->target_data,
5998 					       ready_try, n_ready,
5999 					       first_cycle_insn_p);
6000 
6001   /* Count the number of the insns to search among.  */
6002   for (all = i = 0; i < n_ready; i++)
6003     if (!ready_try [i])
6004       all++;
6005 
6006   if (sched_verbose >= 2)
6007     {
6008       fprintf (sched_dump, ";;\t\tmax_issue among %d insns:", all);
6009       debug_ready_list_1 (ready, ready_try);
6010     }
6011 
6012   /* I is the index of the insn to try next.  */
6013   i = 0;
6014   tries_num = 0;
6015   for (;;)
6016     {
6017       if (/* If we've reached a dead end or searched enough of what we have
6018 	     been asked...  */
6019 	  top->rest == 0
6020 	  /* or have nothing else to try...  */
6021 	  || i >= n_ready
6022 	  /* or should not issue more.  */
6023 	  || top->n >= more_issue)
6024 	{
6025 	  /* ??? (... || i == n_ready).  */
6026 	  gcc_assert (i <= n_ready);
6027 
6028 	  /* We should not issue more than issue_rate instructions.  */
6029 	  gcc_assert (top->n <= more_issue);
6030 
6031 	  if (top == choice_stack)
6032 	    break;
6033 
6034 	  if (best < top - choice_stack)
6035 	    {
6036 	      if (privileged_n)
6037 		{
6038 		  n = privileged_n;
6039 		  /* Try to find issued privileged insn.  */
6040 		  while (n && !ready_try[--n])
6041 		    ;
6042 		}
6043 
6044 	      if (/* If all insns are equally good...  */
6045 		  privileged_n == 0
6046 		  /* Or a privileged insn will be issued.  */
6047 		  || ready_try[n])
6048 		/* Then we have a solution.  */
6049 		{
6050 		  best = top - choice_stack;
6051 		  /* This is the index of the insn issued first in this
6052 		     solution.  */
6053 		  *index = choice_stack [1].index;
6054 		  if (top->n == more_issue || best == all)
6055 		    break;
6056 		}
6057 	    }
6058 
6059 	  /* Set ready-list index to point to the last insn
6060 	     ('i++' below will advance it to the next insn).  */
6061 	  i = top->index;
6062 
6063 	  /* Backtrack.  */
6064 	  ready_try [i] = 0;
6065 
6066 	  if (targetm.sched.first_cycle_multipass_backtrack)
6067 	    targetm.sched.first_cycle_multipass_backtrack (&top->target_data,
6068 							   ready_try, n_ready);
6069 
6070 	  top--;
6071 	  memcpy (state, top->state, dfa_state_size);
6072 	}
6073       else if (!ready_try [i])
6074 	{
6075 	  tries_num++;
6076 	  if (tries_num > max_lookahead_tries)
6077 	    break;
6078 	  insn = ready_element (ready, i);
6079 	  delay = state_transition (state, insn);
6080 	  if (delay < 0)
6081 	    {
6082 	      if (state_dead_lock_p (state)
6083 		  || insn_finishes_cycle_p (insn))
6084 		/* We won't issue any more instructions in the next
6085 		   choice_state.  */
6086 		top->rest = 0;
6087 	      else
6088 		top->rest--;
6089 
6090 	      n = top->n;
6091 	      if (memcmp (top->state, state, dfa_state_size) != 0)
6092 		n++;
6093 
6094 	      /* Advance to the next choice_entry.  */
6095 	      top++;
6096 	      /* Initialize it.  */
6097 	      top->rest = dfa_lookahead;
6098 	      top->index = i;
6099 	      top->n = n;
6100 	      memcpy (top->state, state, dfa_state_size);
6101 	      ready_try [i] = 1;
6102 
6103 	      if (targetm.sched.first_cycle_multipass_issue)
6104 		targetm.sched.first_cycle_multipass_issue (&top->target_data,
6105 							   ready_try, n_ready,
6106 							   insn,
6107 							   &((top - 1)
6108 							     ->target_data));
6109 
6110 	      i = -1;
6111 	    }
6112 	}
6113 
6114       /* Increase ready-list index.  */
6115       i++;
6116     }
6117 
6118   if (targetm.sched.first_cycle_multipass_end)
6119     targetm.sched.first_cycle_multipass_end (best != 0
6120 					     ? &choice_stack[1].target_data
6121 					     : NULL);
6122 
6123   /* Restore the original state of the DFA.  */
6124   memcpy (state, choice_stack->state, dfa_state_size);
6125 
6126   return best;
6127 }
6128 
6129 /* The following function chooses insn from READY and modifies
6130    READY.  The following function is used only for first
6131    cycle multipass scheduling.
6132    Return:
6133    -1 if cycle should be advanced,
6134    0 if INSN_PTR is set to point to the desirable insn,
6135    1 if choose_ready () should be restarted without advancing the cycle.  */
6136 static int
choose_ready(struct ready_list * ready,bool first_cycle_insn_p,rtx_insn ** insn_ptr)6137 choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
6138 	      rtx_insn **insn_ptr)
6139 {
6140   if (dbg_cnt (sched_insn) == false)
6141     {
6142       if (nonscheduled_insns_begin == NULL_RTX)
6143 	nonscheduled_insns_begin = current_sched_info->prev_head;
6144 
6145       rtx_insn *insn = first_nonscheduled_insn ();
6146 
6147       if (QUEUE_INDEX (insn) == QUEUE_READY)
6148 	/* INSN is in the ready_list.  */
6149 	{
6150 	  ready_remove_insn (insn);
6151 	  *insn_ptr = insn;
6152 	  return 0;
6153 	}
6154 
6155       /* INSN is in the queue.  Advance cycle to move it to the ready list.  */
6156       gcc_assert (QUEUE_INDEX (insn) >= 0);
6157       return -1;
6158     }
6159 
6160   if (dfa_lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
6161       || DEBUG_INSN_P (ready_element (ready, 0)))
6162     {
6163       if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
6164 	*insn_ptr = ready_remove_first_dispatch (ready);
6165       else
6166 	*insn_ptr = ready_remove_first (ready);
6167 
6168       return 0;
6169     }
6170   else
6171     {
6172       /* Try to choose the best insn.  */
6173       int index = 0, i;
6174       rtx_insn *insn;
6175 
6176       insn = ready_element (ready, 0);
6177       if (INSN_CODE (insn) < 0)
6178 	{
6179 	  *insn_ptr = ready_remove_first (ready);
6180 	  return 0;
6181 	}
6182 
6183       /* Filter the search space.  */
6184       for (i = 0; i < ready->n_ready; i++)
6185 	{
6186 	  ready_try[i] = 0;
6187 
6188 	  insn = ready_element (ready, i);
6189 
6190 	  /* If this insn is recognizable we should have already
6191 	     recognized it earlier.
6192 	     ??? Not very clear where this is supposed to be done.
6193 	     See dep_cost_1.  */
6194 	  gcc_checking_assert (INSN_CODE (insn) >= 0
6195 			       || recog_memoized (insn) < 0);
6196 	  if (INSN_CODE (insn) < 0)
6197 	    {
6198 	      /* Non-recognized insns at position 0 are handled above.  */
6199 	      gcc_assert (i > 0);
6200 	      ready_try[i] = 1;
6201 	      continue;
6202 	    }
6203 
6204 	  if (targetm.sched.first_cycle_multipass_dfa_lookahead_guard)
6205 	    {
6206 	      ready_try[i]
6207 		= (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
6208 		    (insn, i));
6209 
6210 	      if (ready_try[i] < 0)
6211 		/* Queue instruction for several cycles.
6212 		   We need to restart choose_ready as we have changed
6213 		   the ready list.  */
6214 		{
6215 		  change_queue_index (insn, -ready_try[i]);
6216 		  return 1;
6217 		}
6218 
6219 	      /* Make sure that we didn't end up with 0'th insn filtered out.
6220 		 Don't be tempted to make life easier for backends and just
6221 		 requeue 0'th insn if (ready_try[0] == 0) and restart
6222 		 choose_ready.  Backends should be very considerate about
6223 		 requeueing instructions -- especially the highest priority
6224 		 one at position 0.  */
6225 	      gcc_assert (ready_try[i] == 0 || i > 0);
6226 	      if (ready_try[i])
6227 		continue;
6228 	    }
6229 
6230 	  gcc_assert (ready_try[i] == 0);
6231 	  /* INSN made it through the scrutiny of filters!  */
6232 	}
6233 
6234       if (max_issue (ready, 1, curr_state, first_cycle_insn_p, &index) == 0)
6235 	{
6236 	  *insn_ptr = ready_remove_first (ready);
6237 	  if (sched_verbose >= 4)
6238 	    fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
6239                      (*current_sched_info->print_insn) (*insn_ptr, 0));
6240 	  return 0;
6241 	}
6242       else
6243 	{
6244 	  if (sched_verbose >= 4)
6245 	    fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
6246 		     (*current_sched_info->print_insn)
6247 		     (ready_element (ready, index), 0));
6248 
6249 	  *insn_ptr = ready_remove (ready, index);
6250 	  return 0;
6251 	}
6252     }
6253 }
6254 
6255 /* This function is called when we have successfully scheduled a
6256    block.  It uses the schedule stored in the scheduled_insns vector
6257    to rearrange the RTL.  PREV_HEAD is used as the anchor to which we
6258    append the scheduled insns; TAIL is the insn after the scheduled
6259    block.  TARGET_BB is the argument passed to schedule_block.  */
6260 
6261 static void
commit_schedule(rtx_insn * prev_head,rtx_insn * tail,basic_block * target_bb)6262 commit_schedule (rtx_insn *prev_head, rtx_insn *tail, basic_block *target_bb)
6263 {
6264   unsigned int i;
6265   rtx_insn *insn;
6266 
6267   last_scheduled_insn = prev_head;
6268   for (i = 0;
6269        scheduled_insns.iterate (i, &insn);
6270        i++)
6271     {
6272       if (control_flow_insn_p (last_scheduled_insn)
6273 	  || current_sched_info->advance_target_bb (*target_bb, insn))
6274 	{
6275 	  *target_bb = current_sched_info->advance_target_bb (*target_bb, 0);
6276 
6277 	  if (sched_verbose)
6278 	    {
6279 	      rtx_insn *x;
6280 
6281 	      x = next_real_insn (last_scheduled_insn);
6282 	      gcc_assert (x);
6283 	      dump_new_block_header (1, *target_bb, x, tail);
6284 	    }
6285 
6286 	  last_scheduled_insn = bb_note (*target_bb);
6287 	}
6288 
6289       if (current_sched_info->begin_move_insn)
6290 	(*current_sched_info->begin_move_insn) (insn, last_scheduled_insn);
6291       move_insn (insn, last_scheduled_insn,
6292 		 current_sched_info->next_tail);
6293       if (!DEBUG_INSN_P (insn))
6294 	reemit_notes (insn);
6295       last_scheduled_insn = insn;
6296     }
6297 
6298   scheduled_insns.truncate (0);
6299 }
6300 
6301 /* Examine all insns on the ready list and queue those which can't be
6302    issued in this cycle.  TEMP_STATE is temporary scheduler state we
6303    can use as scratch space.  If FIRST_CYCLE_INSN_P is true, no insns
6304    have been issued for the current cycle, which means it is valid to
6305    issue an asm statement.
6306 
6307    If SHADOWS_ONLY_P is true, we eliminate all real insns and only
6308    leave those for which SHADOW_P is true.  If MODULO_EPILOGUE is true,
6309    we only leave insns which have an INSN_EXACT_TICK.  */
6310 
6311 static void
prune_ready_list(state_t temp_state,bool first_cycle_insn_p,bool shadows_only_p,bool modulo_epilogue_p)6312 prune_ready_list (state_t temp_state, bool first_cycle_insn_p,
6313 		  bool shadows_only_p, bool modulo_epilogue_p)
6314 {
6315   int i, pass;
6316   bool sched_group_found = false;
6317   int min_cost_group = 1;
6318 
6319   if (sched_fusion)
6320     return;
6321 
6322   for (i = 0; i < ready.n_ready; i++)
6323     {
6324       rtx_insn *insn = ready_element (&ready, i);
6325       if (SCHED_GROUP_P (insn))
6326 	{
6327 	  sched_group_found = true;
6328 	  break;
6329 	}
6330     }
6331 
6332   /* Make two passes if there's a SCHED_GROUP_P insn; make sure to handle
6333      such an insn first and note its cost, then schedule all other insns
6334      for one cycle later.  */
6335   for (pass = sched_group_found ? 0 : 1; pass < 2; )
6336     {
6337       int n = ready.n_ready;
6338       for (i = 0; i < n; i++)
6339 	{
6340 	  rtx_insn *insn = ready_element (&ready, i);
6341 	  int cost = 0;
6342 	  const char *reason = "resource conflict";
6343 
6344 	  if (DEBUG_INSN_P (insn))
6345 	    continue;
6346 
6347 	  if (sched_group_found && !SCHED_GROUP_P (insn))
6348 	    {
6349 	      if (pass == 0)
6350 		continue;
6351 	      cost = min_cost_group;
6352 	      reason = "not in sched group";
6353 	    }
6354 	  else if (modulo_epilogue_p
6355 		   && INSN_EXACT_TICK (insn) == INVALID_TICK)
6356 	    {
6357 	      cost = max_insn_queue_index;
6358 	      reason = "not an epilogue insn";
6359 	    }
6360 	  else if (shadows_only_p && !SHADOW_P (insn))
6361 	    {
6362 	      cost = 1;
6363 	      reason = "not a shadow";
6364 	    }
6365 	  else if (recog_memoized (insn) < 0)
6366 	    {
6367 	      if (!first_cycle_insn_p
6368 		  && (GET_CODE (PATTERN (insn)) == ASM_INPUT
6369 		      || asm_noperands (PATTERN (insn)) >= 0))
6370 		cost = 1;
6371 	      reason = "asm";
6372 	    }
6373 	  else if (sched_pressure != SCHED_PRESSURE_NONE)
6374 	    {
6375 	      if (sched_pressure == SCHED_PRESSURE_MODEL
6376 		  && INSN_TICK (insn) <= clock_var)
6377 		{
6378 		  memcpy (temp_state, curr_state, dfa_state_size);
6379 		  if (state_transition (temp_state, insn) >= 0)
6380 		    INSN_TICK (insn) = clock_var + 1;
6381 		}
6382 	      cost = 0;
6383 	    }
6384 	  else
6385 	    {
6386 	      int delay_cost = 0;
6387 
6388 	      if (delay_htab)
6389 		{
6390 		  struct delay_pair *delay_entry;
6391 		  delay_entry
6392 		    = delay_htab->find_with_hash (insn,
6393 						  htab_hash_pointer (insn));
6394 		  while (delay_entry && delay_cost == 0)
6395 		    {
6396 		      delay_cost = estimate_shadow_tick (delay_entry);
6397 		      if (delay_cost > max_insn_queue_index)
6398 			delay_cost = max_insn_queue_index;
6399 		      delay_entry = delay_entry->next_same_i1;
6400 		    }
6401 		}
6402 
6403 	      memcpy (temp_state, curr_state, dfa_state_size);
6404 	      cost = state_transition (temp_state, insn);
6405 	      if (cost < 0)
6406 		cost = 0;
6407 	      else if (cost == 0)
6408 		cost = 1;
6409 	      if (cost < delay_cost)
6410 		{
6411 		  cost = delay_cost;
6412 		  reason = "shadow tick";
6413 		}
6414 	    }
6415 	  if (cost >= 1)
6416 	    {
6417 	      if (SCHED_GROUP_P (insn) && cost > min_cost_group)
6418 		min_cost_group = cost;
6419 	      ready_remove (&ready, i);
6420 	      /* Normally we'd want to queue INSN for COST cycles.  However,
6421 		 if SCHED_GROUP_P is set, then we must ensure that nothing
6422 		 else comes between INSN and its predecessor.  If there is
6423 		 some other insn ready to fire on the next cycle, then that
6424 		 invariant would be broken.
6425 
6426 		 So when SCHED_GROUP_P is set, just queue this insn for a
6427 		 single cycle.  */
6428 	      queue_insn (insn, SCHED_GROUP_P (insn) ? 1 : cost, reason);
6429 	      if (i + 1 < n)
6430 		break;
6431 	    }
6432 	}
6433       if (i == n)
6434 	pass++;
6435     }
6436 }
6437 
6438 /* Called when we detect that the schedule is impossible.  We examine the
6439    backtrack queue to find the earliest insn that caused this condition.  */
6440 
6441 static struct haifa_saved_data *
verify_shadows(void)6442 verify_shadows (void)
6443 {
6444   struct haifa_saved_data *save, *earliest_fail = NULL;
6445   for (save = backtrack_queue; save; save = save->next)
6446     {
6447       int t;
6448       struct delay_pair *pair = save->delay_pair;
6449       rtx_insn *i1 = pair->i1;
6450 
6451       for (; pair; pair = pair->next_same_i1)
6452 	{
6453 	  rtx_insn *i2 = pair->i2;
6454 
6455 	  if (QUEUE_INDEX (i2) == QUEUE_SCHEDULED)
6456 	    continue;
6457 
6458 	  t = INSN_TICK (i1) + pair_delay (pair);
6459 	  if (t < clock_var)
6460 	    {
6461 	      if (sched_verbose >= 2)
6462 		fprintf (sched_dump,
6463 			 ";;\t\tfailed delay requirements for %d/%d (%d->%d)"
6464 			 ", not ready\n",
6465 			 INSN_UID (pair->i1), INSN_UID (pair->i2),
6466 			 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
6467 	      earliest_fail = save;
6468 	      break;
6469 	    }
6470 	  if (QUEUE_INDEX (i2) >= 0)
6471 	    {
6472 	      int queued_for = INSN_TICK (i2);
6473 
6474 	      if (t < queued_for)
6475 		{
6476 		  if (sched_verbose >= 2)
6477 		    fprintf (sched_dump,
6478 			     ";;\t\tfailed delay requirements for %d/%d"
6479 			     " (%d->%d), queued too late\n",
6480 			     INSN_UID (pair->i1), INSN_UID (pair->i2),
6481 			     INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
6482 		  earliest_fail = save;
6483 		  break;
6484 		}
6485 	    }
6486 	}
6487     }
6488 
6489   return earliest_fail;
6490 }
6491 
6492 /* Print instructions together with useful scheduling information between
6493    HEAD and TAIL (inclusive).  */
6494 static void
dump_insn_stream(rtx_insn * head,rtx_insn * tail)6495 dump_insn_stream (rtx_insn *head, rtx_insn *tail)
6496 {
6497   fprintf (sched_dump, ";;\t| insn | prio |\n");
6498 
6499   rtx_insn *next_tail = NEXT_INSN (tail);
6500   for (rtx_insn *insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6501     {
6502       int priority = NOTE_P (insn) ? 0 : INSN_PRIORITY (insn);
6503       const char *pattern = (NOTE_P (insn)
6504 			     ? "note"
6505 			     : str_pattern_slim (PATTERN (insn)));
6506 
6507       fprintf (sched_dump, ";;\t| %4d | %4d | %-30s ",
6508 	       INSN_UID (insn), priority, pattern);
6509 
6510       if (sched_verbose >= 4)
6511 	{
6512 	  if (NOTE_P (insn) || recog_memoized (insn) < 0)
6513 	    fprintf (sched_dump, "nothing");
6514 	  else
6515 	    print_reservation (sched_dump, insn);
6516 	}
6517       fprintf (sched_dump, "\n");
6518     }
6519 }
6520 
6521 /* Use forward list scheduling to rearrange insns of block pointed to by
6522    TARGET_BB, possibly bringing insns from subsequent blocks in the same
6523    region.  */
6524 
6525 bool
schedule_block(basic_block * target_bb,state_t init_state)6526 schedule_block (basic_block *target_bb, state_t init_state)
6527 {
6528   int i;
6529   bool success = modulo_ii == 0;
6530   struct sched_block_state ls;
6531   state_t temp_state = NULL;  /* It is used for multipass scheduling.  */
6532   int sort_p, advance, start_clock_var;
6533 
6534   /* Head/tail info for this block.  */
6535   rtx_insn *prev_head = current_sched_info->prev_head;
6536   rtx_insn *next_tail = current_sched_info->next_tail;
6537   rtx_insn *head = NEXT_INSN (prev_head);
6538   rtx_insn *tail = PREV_INSN (next_tail);
6539 
6540   if ((current_sched_info->flags & DONT_BREAK_DEPENDENCIES) == 0
6541       && sched_pressure != SCHED_PRESSURE_MODEL && !sched_fusion)
6542     find_modifiable_mems (head, tail);
6543 
6544   /* We used to have code to avoid getting parameters moved from hard
6545      argument registers into pseudos.
6546 
6547      However, it was removed when it proved to be of marginal benefit
6548      and caused problems because schedule_block and compute_forward_dependences
6549      had different notions of what the "head" insn was.  */
6550 
6551   gcc_assert (head != tail || INSN_P (head));
6552 
6553   haifa_recovery_bb_recently_added_p = false;
6554 
6555   backtrack_queue = NULL;
6556 
6557   /* Debug info.  */
6558   if (sched_verbose)
6559     {
6560       dump_new_block_header (0, *target_bb, head, tail);
6561 
6562       if (sched_verbose >= 2)
6563 	{
6564 	  dump_insn_stream (head, tail);
6565 	  memset (&rank_for_schedule_stats, 0,
6566 		  sizeof (rank_for_schedule_stats));
6567 	}
6568     }
6569 
6570   if (init_state == NULL)
6571     state_reset (curr_state);
6572   else
6573     memcpy (curr_state, init_state, dfa_state_size);
6574 
6575   /* Clear the ready list.  */
6576   ready.first = ready.veclen - 1;
6577   ready.n_ready = 0;
6578   ready.n_debug = 0;
6579 
6580   /* It is used for first cycle multipass scheduling.  */
6581   temp_state = alloca (dfa_state_size);
6582 
6583   if (targetm.sched.init)
6584     targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
6585 
6586   /* We start inserting insns after PREV_HEAD.  */
6587   last_scheduled_insn = prev_head;
6588   last_nondebug_scheduled_insn = NULL;
6589   nonscheduled_insns_begin = NULL;
6590 
6591   gcc_assert ((NOTE_P (last_scheduled_insn)
6592 	       || DEBUG_INSN_P (last_scheduled_insn))
6593 	      && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
6594 
6595   /* Initialize INSN_QUEUE.  Q_SIZE is the total number of insns in the
6596      queue.  */
6597   q_ptr = 0;
6598   q_size = 0;
6599 
6600   insn_queue = XALLOCAVEC (rtx_insn_list *, max_insn_queue_index + 1);
6601   memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
6602 
6603   /* Start just before the beginning of time.  */
6604   clock_var = -1;
6605 
6606   /* We need queue and ready lists and clock_var be initialized
6607      in try_ready () (which is called through init_ready_list ()).  */
6608   (*current_sched_info->init_ready_list) ();
6609 
6610   if (sched_pressure)
6611     sched_pressure_start_bb (*target_bb);
6612 
6613   /* The algorithm is O(n^2) in the number of ready insns at any given
6614      time in the worst case.  Before reload we are more likely to have
6615      big lists so truncate them to a reasonable size.  */
6616   if (!reload_completed
6617       && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
6618     {
6619       ready_sort_debug (&ready);
6620       ready_sort_real (&ready);
6621 
6622       /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
6623          If there are debug insns, we know they're first.  */
6624       for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
6625 	if (!SCHED_GROUP_P (ready_element (&ready, i)))
6626 	  break;
6627 
6628       if (sched_verbose >= 2)
6629 	{
6630 	  fprintf (sched_dump,
6631 		   ";;\t\tReady list on entry: %d insns:  ", ready.n_ready);
6632 	  debug_ready_list (&ready);
6633 	  fprintf (sched_dump,
6634 		   ";;\t\t before reload => truncated to %d insns\n", i);
6635 	}
6636 
6637       /* Delay all insns past it for 1 cycle.  If debug counter is
6638 	 activated make an exception for the insn right after
6639 	 nonscheduled_insns_begin.  */
6640       {
6641 	rtx_insn *skip_insn;
6642 
6643 	if (dbg_cnt (sched_insn) == false)
6644 	  skip_insn = first_nonscheduled_insn ();
6645 	else
6646 	  skip_insn = NULL;
6647 
6648 	while (i < ready.n_ready)
6649 	  {
6650 	    rtx_insn *insn;
6651 
6652 	    insn = ready_remove (&ready, i);
6653 
6654 	    if (insn != skip_insn)
6655 	      queue_insn (insn, 1, "list truncated");
6656 	  }
6657 	if (skip_insn)
6658 	  ready_add (&ready, skip_insn, true);
6659       }
6660     }
6661 
6662   /* Now we can restore basic block notes and maintain precise cfg.  */
6663   restore_bb_notes (*target_bb);
6664 
6665   last_clock_var = -1;
6666 
6667   advance = 0;
6668 
6669   gcc_assert (scheduled_insns.length () == 0);
6670   sort_p = TRUE;
6671   must_backtrack = false;
6672   modulo_insns_scheduled = 0;
6673 
6674   ls.modulo_epilogue = false;
6675   ls.first_cycle_insn_p = true;
6676 
6677   /* Loop until all the insns in BB are scheduled.  */
6678   while ((*current_sched_info->schedule_more_p) ())
6679     {
6680       perform_replacements_new_cycle ();
6681       do
6682 	{
6683 	  start_clock_var = clock_var;
6684 
6685 	  clock_var++;
6686 
6687 	  advance_one_cycle ();
6688 
6689 	  /* Add to the ready list all pending insns that can be issued now.
6690 	     If there are no ready insns, increment clock until one
6691 	     is ready and add all pending insns at that point to the ready
6692 	     list.  */
6693 	  queue_to_ready (&ready);
6694 
6695 	  gcc_assert (ready.n_ready);
6696 
6697 	  if (sched_verbose >= 2)
6698 	    {
6699 	      fprintf (sched_dump, ";;\t\tReady list after queue_to_ready:");
6700 	      debug_ready_list (&ready);
6701 	    }
6702 	  advance -= clock_var - start_clock_var;
6703 	}
6704       while (advance > 0);
6705 
6706       if (ls.modulo_epilogue)
6707 	{
6708 	  int stage = clock_var / modulo_ii;
6709 	  if (stage > modulo_last_stage * 2 + 2)
6710 	    {
6711 	      if (sched_verbose >= 2)
6712 		fprintf (sched_dump,
6713 			 ";;\t\tmodulo scheduled succeeded at II %d\n",
6714 			 modulo_ii);
6715 	      success = true;
6716 	      goto end_schedule;
6717 	    }
6718 	}
6719       else if (modulo_ii > 0)
6720 	{
6721 	  int stage = clock_var / modulo_ii;
6722 	  if (stage > modulo_max_stages)
6723 	    {
6724 	      if (sched_verbose >= 2)
6725 		fprintf (sched_dump,
6726 			 ";;\t\tfailing schedule due to excessive stages\n");
6727 	      goto end_schedule;
6728 	    }
6729 	  if (modulo_n_insns == modulo_insns_scheduled
6730 	      && stage > modulo_last_stage)
6731 	    {
6732 	      if (sched_verbose >= 2)
6733 		fprintf (sched_dump,
6734 			 ";;\t\tfound kernel after %d stages, II %d\n",
6735 			 stage, modulo_ii);
6736 	      ls.modulo_epilogue = true;
6737 	    }
6738 	}
6739 
6740       prune_ready_list (temp_state, true, false, ls.modulo_epilogue);
6741       if (ready.n_ready == 0)
6742 	continue;
6743       if (must_backtrack)
6744 	goto do_backtrack;
6745 
6746       ls.shadows_only_p = false;
6747       cycle_issued_insns = 0;
6748       ls.can_issue_more = issue_rate;
6749       for (;;)
6750 	{
6751 	  rtx_insn *insn;
6752 	  int cost;
6753 	  bool asm_p;
6754 
6755 	  if (sort_p && ready.n_ready > 0)
6756 	    {
6757 	      /* Sort the ready list based on priority.  This must be
6758 		 done every iteration through the loop, as schedule_insn
6759 		 may have readied additional insns that will not be
6760 		 sorted correctly.  */
6761 	      ready_sort (&ready);
6762 
6763 	      if (sched_verbose >= 2)
6764 		{
6765 		  fprintf (sched_dump,
6766 			   ";;\t\tReady list after ready_sort:    ");
6767 		  debug_ready_list (&ready);
6768 		}
6769 	    }
6770 
6771 	  /* We don't want md sched reorder to even see debug isns, so put
6772 	     them out right away.  */
6773 	  if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
6774 	      && (*current_sched_info->schedule_more_p) ())
6775 	    {
6776 	      while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
6777 		{
6778 		  rtx_insn *insn = ready_remove_first (&ready);
6779 		  gcc_assert (DEBUG_INSN_P (insn));
6780 		  (*current_sched_info->begin_schedule_ready) (insn);
6781 		  scheduled_insns.safe_push (insn);
6782 		  last_scheduled_insn = insn;
6783 		  advance = schedule_insn (insn);
6784 		  gcc_assert (advance == 0);
6785 		  if (ready.n_ready > 0)
6786 		    ready_sort (&ready);
6787 		}
6788 	    }
6789 
6790 	  if (ls.first_cycle_insn_p && !ready.n_ready)
6791 	    break;
6792 
6793 	resume_after_backtrack:
6794 	  /* Allow the target to reorder the list, typically for
6795 	     better instruction bundling.  */
6796 	  if (sort_p
6797 	      && (ready.n_ready == 0
6798 		  || !SCHED_GROUP_P (ready_element (&ready, 0))))
6799 	    {
6800 	      if (ls.first_cycle_insn_p && targetm.sched.reorder)
6801 		ls.can_issue_more
6802 		  = targetm.sched.reorder (sched_dump, sched_verbose,
6803 					   ready_lastpos (&ready),
6804 					   &ready.n_ready, clock_var);
6805 	      else if (!ls.first_cycle_insn_p && targetm.sched.reorder2)
6806 		ls.can_issue_more
6807 		  = targetm.sched.reorder2 (sched_dump, sched_verbose,
6808 					    ready.n_ready
6809 					    ? ready_lastpos (&ready) : NULL,
6810 					    &ready.n_ready, clock_var);
6811 	    }
6812 
6813 	restart_choose_ready:
6814 	  if (sched_verbose >= 2)
6815 	    {
6816 	      fprintf (sched_dump, ";;\tReady list (t = %3d):  ",
6817 		       clock_var);
6818 	      debug_ready_list (&ready);
6819 	      if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
6820 		print_curr_reg_pressure ();
6821 	    }
6822 
6823 	  if (ready.n_ready == 0
6824 	      && ls.can_issue_more
6825 	      && reload_completed)
6826 	    {
6827 	      /* Allow scheduling insns directly from the queue in case
6828 		 there's nothing better to do (ready list is empty) but
6829 		 there are still vacant dispatch slots in the current cycle.  */
6830 	      if (sched_verbose >= 6)
6831 		fprintf (sched_dump,";;\t\tSecond chance\n");
6832 	      memcpy (temp_state, curr_state, dfa_state_size);
6833 	      if (early_queue_to_ready (temp_state, &ready))
6834 		ready_sort (&ready);
6835 	    }
6836 
6837 	  if (ready.n_ready == 0
6838 	      || !ls.can_issue_more
6839 	      || state_dead_lock_p (curr_state)
6840 	      || !(*current_sched_info->schedule_more_p) ())
6841 	    break;
6842 
6843 	  /* Select and remove the insn from the ready list.  */
6844 	  if (sort_p)
6845 	    {
6846 	      int res;
6847 
6848 	      insn = NULL;
6849 	      res = choose_ready (&ready, ls.first_cycle_insn_p, &insn);
6850 
6851 	      if (res < 0)
6852 		/* Finish cycle.  */
6853 		break;
6854 	      if (res > 0)
6855 		goto restart_choose_ready;
6856 
6857 	      gcc_assert (insn != NULL_RTX);
6858 	    }
6859 	  else
6860 	    insn = ready_remove_first (&ready);
6861 
6862 	  if (sched_pressure != SCHED_PRESSURE_NONE
6863 	      && INSN_TICK (insn) > clock_var)
6864 	    {
6865 	      ready_add (&ready, insn, true);
6866 	      advance = 1;
6867 	      break;
6868 	    }
6869 
6870 	  if (targetm.sched.dfa_new_cycle
6871 	      && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
6872 					      insn, last_clock_var,
6873 					      clock_var, &sort_p))
6874 	    /* SORT_P is used by the target to override sorting
6875 	       of the ready list.  This is needed when the target
6876 	       has modified its internal structures expecting that
6877 	       the insn will be issued next.  As we need the insn
6878 	       to have the highest priority (so it will be returned by
6879 	       the ready_remove_first call above), we invoke
6880 	       ready_add (&ready, insn, true).
6881 	       But, still, there is one issue: INSN can be later
6882 	       discarded by scheduler's front end through
6883 	       current_sched_info->can_schedule_ready_p, hence, won't
6884 	       be issued next.  */
6885 	    {
6886 	      ready_add (&ready, insn, true);
6887               break;
6888 	    }
6889 
6890 	  sort_p = TRUE;
6891 
6892 	  if (current_sched_info->can_schedule_ready_p
6893 	      && ! (*current_sched_info->can_schedule_ready_p) (insn))
6894 	    /* We normally get here only if we don't want to move
6895 	       insn from the split block.  */
6896 	    {
6897 	      TODO_SPEC (insn) = DEP_POSTPONED;
6898 	      goto restart_choose_ready;
6899 	    }
6900 
6901 	  if (delay_htab)
6902 	    {
6903 	      /* If this insn is the first part of a delay-slot pair, record a
6904 		 backtrack point.  */
6905 	      struct delay_pair *delay_entry;
6906 	      delay_entry
6907 		= delay_htab->find_with_hash (insn, htab_hash_pointer (insn));
6908 	      if (delay_entry)
6909 		{
6910 		  save_backtrack_point (delay_entry, ls);
6911 		  if (sched_verbose >= 2)
6912 		    fprintf (sched_dump, ";;\t\tsaving backtrack point\n");
6913 		}
6914 	    }
6915 
6916 	  /* DECISION is made.  */
6917 
6918 	  if (modulo_ii > 0 && INSN_UID (insn) < modulo_iter0_max_uid)
6919 	    {
6920 	      modulo_insns_scheduled++;
6921 	      modulo_last_stage = clock_var / modulo_ii;
6922 	    }
6923           if (TODO_SPEC (insn) & SPECULATIVE)
6924             generate_recovery_code (insn);
6925 
6926 	  if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
6927 	    targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
6928 
6929 	  /* Update counters, etc in the scheduler's front end.  */
6930 	  (*current_sched_info->begin_schedule_ready) (insn);
6931 	  scheduled_insns.safe_push (insn);
6932 	  gcc_assert (NONDEBUG_INSN_P (insn));
6933 	  last_nondebug_scheduled_insn = last_scheduled_insn = insn;
6934 
6935 	  if (recog_memoized (insn) >= 0)
6936 	    {
6937 	      memcpy (temp_state, curr_state, dfa_state_size);
6938 	      cost = state_transition (curr_state, insn);
6939 	      if (sched_pressure != SCHED_PRESSURE_WEIGHTED && !sched_fusion)
6940 		gcc_assert (cost < 0);
6941 	      if (memcmp (temp_state, curr_state, dfa_state_size) != 0)
6942 		cycle_issued_insns++;
6943 	      asm_p = false;
6944 	    }
6945 	  else
6946 	    asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
6947 		     || asm_noperands (PATTERN (insn)) >= 0);
6948 
6949 	  if (targetm.sched.variable_issue)
6950 	    ls.can_issue_more =
6951 	      targetm.sched.variable_issue (sched_dump, sched_verbose,
6952 					    insn, ls.can_issue_more);
6953 	  /* A naked CLOBBER or USE generates no instruction, so do
6954 	     not count them against the issue rate.  */
6955 	  else if (GET_CODE (PATTERN (insn)) != USE
6956 		   && GET_CODE (PATTERN (insn)) != CLOBBER)
6957 	    ls.can_issue_more--;
6958 	  advance = schedule_insn (insn);
6959 
6960 	  if (SHADOW_P (insn))
6961 	    ls.shadows_only_p = true;
6962 
6963 	  /* After issuing an asm insn we should start a new cycle.  */
6964 	  if (advance == 0 && asm_p)
6965 	    advance = 1;
6966 
6967 	  if (must_backtrack)
6968 	    break;
6969 
6970 	  if (advance != 0)
6971 	    break;
6972 
6973 	  ls.first_cycle_insn_p = false;
6974 	  if (ready.n_ready > 0)
6975 	    prune_ready_list (temp_state, false, ls.shadows_only_p,
6976 			      ls.modulo_epilogue);
6977 	}
6978 
6979     do_backtrack:
6980       if (!must_backtrack)
6981 	for (i = 0; i < ready.n_ready; i++)
6982 	  {
6983 	    rtx_insn *insn = ready_element (&ready, i);
6984 	    if (INSN_EXACT_TICK (insn) == clock_var)
6985 	      {
6986 		must_backtrack = true;
6987 		clock_var++;
6988 		break;
6989 	      }
6990 	  }
6991       if (must_backtrack && modulo_ii > 0)
6992 	{
6993 	  if (modulo_backtracks_left == 0)
6994 	    goto end_schedule;
6995 	  modulo_backtracks_left--;
6996 	}
6997       while (must_backtrack)
6998 	{
6999 	  struct haifa_saved_data *failed;
7000 	  rtx_insn *failed_insn;
7001 
7002 	  must_backtrack = false;
7003 	  failed = verify_shadows ();
7004 	  gcc_assert (failed);
7005 
7006 	  failed_insn = failed->delay_pair->i1;
7007 	  /* Clear these queues.  */
7008 	  perform_replacements_new_cycle ();
7009 	  toggle_cancelled_flags (false);
7010 	  unschedule_insns_until (failed_insn);
7011 	  while (failed != backtrack_queue)
7012 	    free_topmost_backtrack_point (true);
7013 	  restore_last_backtrack_point (&ls);
7014 	  if (sched_verbose >= 2)
7015 	    fprintf (sched_dump, ";;\t\trewind to cycle %d\n", clock_var);
7016 	  /* Delay by at least a cycle.  This could cause additional
7017 	     backtracking.  */
7018 	  queue_insn (failed_insn, 1, "backtracked");
7019 	  advance = 0;
7020 	  if (must_backtrack)
7021 	    continue;
7022 	  if (ready.n_ready > 0)
7023 	    goto resume_after_backtrack;
7024 	  else
7025 	    {
7026 	      if (clock_var == 0 && ls.first_cycle_insn_p)
7027 		goto end_schedule;
7028 	      advance = 1;
7029 	      break;
7030 	    }
7031 	}
7032       ls.first_cycle_insn_p = true;
7033     }
7034   if (ls.modulo_epilogue)
7035     success = true;
7036  end_schedule:
7037   if (!ls.first_cycle_insn_p || advance)
7038     advance_one_cycle ();
7039   perform_replacements_new_cycle ();
7040   if (modulo_ii > 0)
7041     {
7042       /* Once again, debug insn suckiness: they can be on the ready list
7043 	 even if they have unresolved dependencies.  To make our view
7044 	 of the world consistent, remove such "ready" insns.  */
7045     restart_debug_insn_loop:
7046       for (i = ready.n_ready - 1; i >= 0; i--)
7047 	{
7048 	  rtx_insn *x;
7049 
7050 	  x = ready_element (&ready, i);
7051 	  if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (x)) != NULL
7052 	      || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (x)) != NULL)
7053 	    {
7054 	      ready_remove (&ready, i);
7055 	      goto restart_debug_insn_loop;
7056 	    }
7057 	}
7058       for (i = ready.n_ready - 1; i >= 0; i--)
7059 	{
7060 	  rtx_insn *x;
7061 
7062 	  x = ready_element (&ready, i);
7063 	  resolve_dependencies (x);
7064 	}
7065       for (i = 0; i <= max_insn_queue_index; i++)
7066 	{
7067 	  rtx_insn_list *link;
7068 	  while ((link = insn_queue[i]) != NULL)
7069 	    {
7070 	      rtx_insn *x = link->insn ();
7071 	      insn_queue[i] = link->next ();
7072 	      QUEUE_INDEX (x) = QUEUE_NOWHERE;
7073 	      free_INSN_LIST_node (link);
7074 	      resolve_dependencies (x);
7075 	    }
7076 	}
7077     }
7078 
7079   if (!success)
7080     undo_all_replacements ();
7081 
7082   /* Debug info.  */
7083   if (sched_verbose)
7084     {
7085       fprintf (sched_dump, ";;\tReady list (final):  ");
7086       debug_ready_list (&ready);
7087     }
7088 
7089   if (modulo_ii == 0 && current_sched_info->queue_must_finish_empty)
7090     /* Sanity check -- queue must be empty now.  Meaningless if region has
7091        multiple bbs.  */
7092     gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
7093   else if (modulo_ii == 0)
7094     {
7095       /* We must maintain QUEUE_INDEX between blocks in region.  */
7096       for (i = ready.n_ready - 1; i >= 0; i--)
7097 	{
7098 	  rtx_insn *x;
7099 
7100 	  x = ready_element (&ready, i);
7101 	  QUEUE_INDEX (x) = QUEUE_NOWHERE;
7102 	  TODO_SPEC (x) = HARD_DEP;
7103 	}
7104 
7105       if (q_size)
7106 	for (i = 0; i <= max_insn_queue_index; i++)
7107 	  {
7108 	    rtx_insn_list *link;
7109 	    for (link = insn_queue[i]; link; link = link->next ())
7110 	      {
7111 		rtx_insn *x;
7112 
7113 		x = link->insn ();
7114 		QUEUE_INDEX (x) = QUEUE_NOWHERE;
7115 		TODO_SPEC (x) = HARD_DEP;
7116 	      }
7117 	    free_INSN_LIST_list (&insn_queue[i]);
7118 	  }
7119     }
7120 
7121   if (sched_pressure == SCHED_PRESSURE_MODEL)
7122     model_end_schedule ();
7123 
7124   if (success)
7125     {
7126       commit_schedule (prev_head, tail, target_bb);
7127       if (sched_verbose)
7128 	fprintf (sched_dump, ";;   total time = %d\n", clock_var);
7129     }
7130   else
7131     last_scheduled_insn = tail;
7132 
7133   scheduled_insns.truncate (0);
7134 
7135   if (!current_sched_info->queue_must_finish_empty
7136       || haifa_recovery_bb_recently_added_p)
7137     {
7138       /* INSN_TICK (minimum clock tick at which the insn becomes
7139          ready) may be not correct for the insn in the subsequent
7140          blocks of the region.  We should use a correct value of
7141          `clock_var' or modify INSN_TICK.  It is better to keep
7142          clock_var value equal to 0 at the start of a basic block.
7143          Therefore we modify INSN_TICK here.  */
7144       fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
7145     }
7146 
7147   if (targetm.sched.finish)
7148     {
7149       targetm.sched.finish (sched_dump, sched_verbose);
7150       /* Target might have added some instructions to the scheduled block
7151 	 in its md_finish () hook.  These new insns don't have any data
7152 	 initialized and to identify them we extend h_i_d so that they'll
7153 	 get zero luids.  */
7154       sched_extend_luids ();
7155     }
7156 
7157   /* Update head/tail boundaries.  */
7158   head = NEXT_INSN (prev_head);
7159   tail = last_scheduled_insn;
7160 
7161   if (sched_verbose)
7162     {
7163       fprintf (sched_dump, ";;   new head = %d\n;;   new tail = %d\n",
7164 	       INSN_UID (head), INSN_UID (tail));
7165 
7166       if (sched_verbose >= 2)
7167 	{
7168 	  dump_insn_stream (head, tail);
7169 	  print_rank_for_schedule_stats (";; TOTAL ", &rank_for_schedule_stats,
7170 					 NULL);
7171 	}
7172 
7173       fprintf (sched_dump, "\n");
7174     }
7175 
7176   head = restore_other_notes (head, NULL);
7177 
7178   current_sched_info->head = head;
7179   current_sched_info->tail = tail;
7180 
7181   free_backtrack_queue ();
7182 
7183   return success;
7184 }
7185 
7186 /* Set_priorities: compute priority of each insn in the block.  */
7187 
7188 int
set_priorities(rtx_insn * head,rtx_insn * tail)7189 set_priorities (rtx_insn *head, rtx_insn *tail)
7190 {
7191   rtx_insn *insn;
7192   int n_insn;
7193   int sched_max_insns_priority =
7194 	current_sched_info->sched_max_insns_priority;
7195   rtx_insn *prev_head;
7196 
7197   if (head == tail && ! INSN_P (head))
7198     gcc_unreachable ();
7199 
7200   n_insn = 0;
7201 
7202   prev_head = PREV_INSN (head);
7203   for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
7204     {
7205       if (!INSN_P (insn))
7206 	continue;
7207 
7208       n_insn++;
7209       (void) priority (insn);
7210 
7211       gcc_assert (INSN_PRIORITY_KNOWN (insn));
7212 
7213       sched_max_insns_priority = MAX (sched_max_insns_priority,
7214 				      INSN_PRIORITY (insn));
7215     }
7216 
7217   current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
7218 
7219   return n_insn;
7220 }
7221 
7222 /* Set sched_dump and sched_verbose for the desired debugging output. */
7223 void
setup_sched_dump(void)7224 setup_sched_dump (void)
7225 {
7226   sched_verbose = sched_verbose_param;
7227   sched_dump = dump_file;
7228   if (!dump_file)
7229     sched_verbose = 0;
7230 }
7231 
7232 /* Allocate data for register pressure sensitive scheduling.  */
7233 static void
alloc_global_sched_pressure_data(void)7234 alloc_global_sched_pressure_data (void)
7235 {
7236   if (sched_pressure != SCHED_PRESSURE_NONE)
7237     {
7238       int i, max_regno = max_reg_num ();
7239 
7240       if (sched_dump != NULL)
7241 	/* We need info about pseudos for rtl dumps about pseudo
7242 	   classes and costs.  */
7243 	regstat_init_n_sets_and_refs ();
7244       ira_set_pseudo_classes (true, sched_verbose ? sched_dump : NULL);
7245       sched_regno_pressure_class
7246 	= (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
7247       for (i = 0; i < max_regno; i++)
7248 	sched_regno_pressure_class[i]
7249 	  = (i < FIRST_PSEUDO_REGISTER
7250 	     ? ira_pressure_class_translate[REGNO_REG_CLASS (i)]
7251 	     : ira_pressure_class_translate[reg_allocno_class (i)]);
7252       curr_reg_live = BITMAP_ALLOC (NULL);
7253       if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
7254 	{
7255 	  saved_reg_live = BITMAP_ALLOC (NULL);
7256 	  region_ref_regs = BITMAP_ALLOC (NULL);
7257 	}
7258 
7259       /* Calculate number of CALL_USED_REGS in register classes that
7260 	 we calculate register pressure for.  */
7261       for (int c = 0; c < ira_pressure_classes_num; ++c)
7262 	{
7263 	  enum reg_class cl = ira_pressure_classes[c];
7264 
7265 	  call_used_regs_num[cl] = 0;
7266 
7267 	  for (int i = 0; i < ira_class_hard_regs_num[cl]; ++i)
7268 	    if (call_used_regs[ira_class_hard_regs[cl][i]])
7269 	      ++call_used_regs_num[cl];
7270 	}
7271     }
7272 }
7273 
7274 /*  Free data for register pressure sensitive scheduling.  Also called
7275     from schedule_region when stopping sched-pressure early.  */
7276 void
free_global_sched_pressure_data(void)7277 free_global_sched_pressure_data (void)
7278 {
7279   if (sched_pressure != SCHED_PRESSURE_NONE)
7280     {
7281       if (regstat_n_sets_and_refs != NULL)
7282 	regstat_free_n_sets_and_refs ();
7283       if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
7284 	{
7285 	  BITMAP_FREE (region_ref_regs);
7286 	  BITMAP_FREE (saved_reg_live);
7287 	}
7288       BITMAP_FREE (curr_reg_live);
7289       free (sched_regno_pressure_class);
7290     }
7291 }
7292 
7293 /* Initialize some global state for the scheduler.  This function works
7294    with the common data shared between all the schedulers.  It is called
7295    from the scheduler specific initialization routine.  */
7296 
7297 void
sched_init(void)7298 sched_init (void)
7299 {
7300   /* Disable speculative loads in their presence if cc0 defined.  */
7301   if (HAVE_cc0)
7302   flag_schedule_speculative_load = 0;
7303 
7304   if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
7305     targetm.sched.dispatch_do (NULL, DISPATCH_INIT);
7306 
7307   if (live_range_shrinkage_p)
7308     sched_pressure = SCHED_PRESSURE_WEIGHTED;
7309   else if (flag_sched_pressure
7310 	   && !reload_completed
7311 	   && common_sched_info->sched_pass_id == SCHED_RGN_PASS)
7312     sched_pressure = ((enum sched_pressure_algorithm)
7313 		      PARAM_VALUE (PARAM_SCHED_PRESSURE_ALGORITHM));
7314   else
7315     sched_pressure = SCHED_PRESSURE_NONE;
7316 
7317   if (sched_pressure != SCHED_PRESSURE_NONE)
7318     ira_setup_eliminable_regset ();
7319 
7320   /* Initialize SPEC_INFO.  */
7321   if (targetm.sched.set_sched_flags)
7322     {
7323       spec_info = &spec_info_var;
7324       targetm.sched.set_sched_flags (spec_info);
7325 
7326       if (spec_info->mask != 0)
7327         {
7328           spec_info->data_weakness_cutoff =
7329             (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
7330           spec_info->control_weakness_cutoff =
7331             (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
7332              * REG_BR_PROB_BASE) / 100;
7333         }
7334       else
7335 	/* So we won't read anything accidentally.  */
7336 	spec_info = NULL;
7337 
7338     }
7339   else
7340     /* So we won't read anything accidentally.  */
7341     spec_info = 0;
7342 
7343   /* Initialize issue_rate.  */
7344   if (targetm.sched.issue_rate)
7345     issue_rate = targetm.sched.issue_rate ();
7346   else
7347     issue_rate = 1;
7348 
7349   if (targetm.sched.first_cycle_multipass_dfa_lookahead
7350       /* Don't use max_issue with reg_pressure scheduling.  Multipass
7351 	 scheduling and reg_pressure scheduling undo each other's decisions.  */
7352       && sched_pressure == SCHED_PRESSURE_NONE)
7353     dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
7354   else
7355     dfa_lookahead = 0;
7356 
7357   /* Set to "0" so that we recalculate.  */
7358   max_lookahead_tries = 0;
7359 
7360   if (targetm.sched.init_dfa_pre_cycle_insn)
7361     targetm.sched.init_dfa_pre_cycle_insn ();
7362 
7363   if (targetm.sched.init_dfa_post_cycle_insn)
7364     targetm.sched.init_dfa_post_cycle_insn ();
7365 
7366   dfa_start ();
7367   dfa_state_size = state_size ();
7368 
7369   init_alias_analysis ();
7370 
7371   if (!sched_no_dce)
7372     df_set_flags (DF_LR_RUN_DCE);
7373   df_note_add_problem ();
7374 
7375   /* More problems needed for interloop dep calculation in SMS.  */
7376   if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
7377     {
7378       df_rd_add_problem ();
7379       df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
7380     }
7381 
7382   df_analyze ();
7383 
7384   /* Do not run DCE after reload, as this can kill nops inserted
7385      by bundling.  */
7386   if (reload_completed)
7387     df_clear_flags (DF_LR_RUN_DCE);
7388 
7389   regstat_compute_calls_crossed ();
7390 
7391   if (targetm.sched.init_global)
7392     targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
7393 
7394   alloc_global_sched_pressure_data ();
7395 
7396   curr_state = xmalloc (dfa_state_size);
7397 }
7398 
7399 static void haifa_init_only_bb (basic_block, basic_block);
7400 
7401 /* Initialize data structures specific to the Haifa scheduler.  */
7402 void
haifa_sched_init(void)7403 haifa_sched_init (void)
7404 {
7405   setup_sched_dump ();
7406   sched_init ();
7407 
7408   scheduled_insns.create (0);
7409 
7410   if (spec_info != NULL)
7411     {
7412       sched_deps_info->use_deps_list = 1;
7413       sched_deps_info->generate_spec_deps = 1;
7414     }
7415 
7416   /* Initialize luids, dependency caches, target and h_i_d for the
7417      whole function.  */
7418   {
7419     bb_vec_t bbs;
7420     bbs.create (n_basic_blocks_for_fn (cfun));
7421     basic_block bb;
7422 
7423     sched_init_bbs ();
7424 
7425     FOR_EACH_BB_FN (bb, cfun)
7426       bbs.quick_push (bb);
7427     sched_init_luids (bbs);
7428     sched_deps_init (true);
7429     sched_extend_target ();
7430     haifa_init_h_i_d (bbs);
7431 
7432     bbs.release ();
7433   }
7434 
7435   sched_init_only_bb = haifa_init_only_bb;
7436   sched_split_block = sched_split_block_1;
7437   sched_create_empty_bb = sched_create_empty_bb_1;
7438   haifa_recovery_bb_ever_added_p = false;
7439 
7440   nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
7441   before_recovery = 0;
7442   after_recovery = 0;
7443 
7444   modulo_ii = 0;
7445 }
7446 
7447 /* Finish work with the data specific to the Haifa scheduler.  */
7448 void
haifa_sched_finish(void)7449 haifa_sched_finish (void)
7450 {
7451   sched_create_empty_bb = NULL;
7452   sched_split_block = NULL;
7453   sched_init_only_bb = NULL;
7454 
7455   if (spec_info && spec_info->dump)
7456     {
7457       char c = reload_completed ? 'a' : 'b';
7458 
7459       fprintf (spec_info->dump,
7460 	       ";; %s:\n", current_function_name ());
7461 
7462       fprintf (spec_info->dump,
7463                ";; Procedure %cr-begin-data-spec motions == %d\n",
7464                c, nr_begin_data);
7465       fprintf (spec_info->dump,
7466                ";; Procedure %cr-be-in-data-spec motions == %d\n",
7467                c, nr_be_in_data);
7468       fprintf (spec_info->dump,
7469                ";; Procedure %cr-begin-control-spec motions == %d\n",
7470                c, nr_begin_control);
7471       fprintf (spec_info->dump,
7472                ";; Procedure %cr-be-in-control-spec motions == %d\n",
7473                c, nr_be_in_control);
7474     }
7475 
7476   scheduled_insns.release ();
7477 
7478   /* Finalize h_i_d, dependency caches, and luids for the whole
7479      function.  Target will be finalized in md_global_finish ().  */
7480   sched_deps_finish ();
7481   sched_finish_luids ();
7482   current_sched_info = NULL;
7483   insn_queue = NULL;
7484   sched_finish ();
7485 }
7486 
7487 /* Free global data used during insn scheduling.  This function works with
7488    the common data shared between the schedulers.  */
7489 
7490 void
sched_finish(void)7491 sched_finish (void)
7492 {
7493   haifa_finish_h_i_d ();
7494   free_global_sched_pressure_data ();
7495   free (curr_state);
7496 
7497   if (targetm.sched.finish_global)
7498     targetm.sched.finish_global (sched_dump, sched_verbose);
7499 
7500   end_alias_analysis ();
7501 
7502   regstat_free_calls_crossed ();
7503 
7504   dfa_finish ();
7505 }
7506 
7507 /* Free all delay_pair structures that were recorded.  */
7508 void
free_delay_pairs(void)7509 free_delay_pairs (void)
7510 {
7511   if (delay_htab)
7512     {
7513       delay_htab->empty ();
7514       delay_htab_i2->empty ();
7515     }
7516 }
7517 
7518 /* Fix INSN_TICKs of the instructions in the current block as well as
7519    INSN_TICKs of their dependents.
7520    HEAD and TAIL are the begin and the end of the current scheduled block.  */
7521 static void
fix_inter_tick(rtx_insn * head,rtx_insn * tail)7522 fix_inter_tick (rtx_insn *head, rtx_insn *tail)
7523 {
7524   /* Set of instructions with corrected INSN_TICK.  */
7525   bitmap_head processed;
7526   /* ??? It is doubtful if we should assume that cycle advance happens on
7527      basic block boundaries.  Basically insns that are unconditionally ready
7528      on the start of the block are more preferable then those which have
7529      a one cycle dependency over insn from the previous block.  */
7530   int next_clock = clock_var + 1;
7531 
7532   bitmap_initialize (&processed, 0);
7533 
7534   /* Iterates over scheduled instructions and fix their INSN_TICKs and
7535      INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
7536      across different blocks.  */
7537   for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
7538     {
7539       if (INSN_P (head))
7540 	{
7541 	  int tick;
7542 	  sd_iterator_def sd_it;
7543 	  dep_t dep;
7544 
7545 	  tick = INSN_TICK (head);
7546 	  gcc_assert (tick >= MIN_TICK);
7547 
7548 	  /* Fix INSN_TICK of instruction from just scheduled block.  */
7549 	  if (bitmap_set_bit (&processed, INSN_LUID (head)))
7550 	    {
7551 	      tick -= next_clock;
7552 
7553 	      if (tick < MIN_TICK)
7554 		tick = MIN_TICK;
7555 
7556 	      INSN_TICK (head) = tick;
7557 	    }
7558 
7559 	  if (DEBUG_INSN_P (head))
7560 	    continue;
7561 
7562 	  FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
7563 	    {
7564 	      rtx_insn *next;
7565 
7566 	      next = DEP_CON (dep);
7567 	      tick = INSN_TICK (next);
7568 
7569 	      if (tick != INVALID_TICK
7570 		  /* If NEXT has its INSN_TICK calculated, fix it.
7571 		     If not - it will be properly calculated from
7572 		     scratch later in fix_tick_ready.  */
7573 		  && bitmap_set_bit (&processed, INSN_LUID (next)))
7574 		{
7575 		  tick -= next_clock;
7576 
7577 		  if (tick < MIN_TICK)
7578 		    tick = MIN_TICK;
7579 
7580 		  if (tick > INTER_TICK (next))
7581 		    INTER_TICK (next) = tick;
7582 		  else
7583 		    tick = INTER_TICK (next);
7584 
7585 		  INSN_TICK (next) = tick;
7586 		}
7587 	    }
7588 	}
7589     }
7590   bitmap_clear (&processed);
7591 }
7592 
7593 /* Check if NEXT is ready to be added to the ready or queue list.
7594    If "yes", add it to the proper list.
7595    Returns:
7596       -1 - is not ready yet,
7597        0 - added to the ready list,
7598    0 < N - queued for N cycles.  */
7599 int
try_ready(rtx_insn * next)7600 try_ready (rtx_insn *next)
7601 {
7602   ds_t old_ts, new_ts;
7603 
7604   old_ts = TODO_SPEC (next);
7605 
7606   gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP | DEP_CONTROL | DEP_POSTPONED))
7607 	      && (old_ts == HARD_DEP
7608 		  || old_ts == DEP_POSTPONED
7609 		  || (old_ts & SPECULATIVE)
7610 		  || old_ts == DEP_CONTROL));
7611 
7612   new_ts = recompute_todo_spec (next, false);
7613 
7614   if (new_ts & (HARD_DEP | DEP_POSTPONED))
7615     gcc_assert (new_ts == old_ts
7616 		&& QUEUE_INDEX (next) == QUEUE_NOWHERE);
7617   else if (current_sched_info->new_ready)
7618     new_ts = current_sched_info->new_ready (next, new_ts);
7619 
7620   /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
7621      have its original pattern or changed (speculative) one.  This is due
7622      to changing ebb in region scheduling.
7623      * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
7624      has speculative pattern.
7625 
7626      We can't assert (!(new_ts & HARD_DEP) || new_ts == old_ts) here because
7627      control-speculative NEXT could have been discarded by sched-rgn.c
7628      (the same case as when discarded by can_schedule_ready_p ()).  */
7629 
7630   if ((new_ts & SPECULATIVE)
7631       /* If (old_ts == new_ts), then (old_ts & SPECULATIVE) and we don't
7632 	 need to change anything.  */
7633       && new_ts != old_ts)
7634     {
7635       int res;
7636       rtx new_pat;
7637 
7638       gcc_assert ((new_ts & SPECULATIVE) && !(new_ts & ~SPECULATIVE));
7639 
7640       res = haifa_speculate_insn (next, new_ts, &new_pat);
7641 
7642       switch (res)
7643 	{
7644 	case -1:
7645 	  /* It would be nice to change DEP_STATUS of all dependences,
7646 	     which have ((DEP_STATUS & SPECULATIVE) == new_ts) to HARD_DEP,
7647 	     so we won't reanalyze anything.  */
7648 	  new_ts = HARD_DEP;
7649 	  break;
7650 
7651 	case 0:
7652 	  /* We follow the rule, that every speculative insn
7653 	     has non-null ORIG_PAT.  */
7654 	  if (!ORIG_PAT (next))
7655 	    ORIG_PAT (next) = PATTERN (next);
7656 	  break;
7657 
7658 	case 1:
7659 	  if (!ORIG_PAT (next))
7660 	    /* If we gonna to overwrite the original pattern of insn,
7661 	       save it.  */
7662 	    ORIG_PAT (next) = PATTERN (next);
7663 
7664 	  res = haifa_change_pattern (next, new_pat);
7665 	  gcc_assert (res);
7666 	  break;
7667 
7668 	default:
7669 	  gcc_unreachable ();
7670 	}
7671     }
7672 
7673   /* We need to restore pattern only if (new_ts == 0), because otherwise it is
7674      either correct (new_ts & SPECULATIVE),
7675      or we simply don't care (new_ts & HARD_DEP).  */
7676 
7677   gcc_assert (!ORIG_PAT (next)
7678 	      || !IS_SPECULATION_BRANCHY_CHECK_P (next));
7679 
7680   TODO_SPEC (next) = new_ts;
7681 
7682   if (new_ts & (HARD_DEP | DEP_POSTPONED))
7683     {
7684       /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
7685 	 control-speculative NEXT could have been discarded by sched-rgn.c
7686 	 (the same case as when discarded by can_schedule_ready_p ()).  */
7687       /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
7688 
7689       change_queue_index (next, QUEUE_NOWHERE);
7690 
7691       return -1;
7692     }
7693   else if (!(new_ts & BEGIN_SPEC)
7694 	   && ORIG_PAT (next) && PREDICATED_PAT (next) == NULL_RTX
7695 	   && !IS_SPECULATION_CHECK_P (next))
7696     /* We should change pattern of every previously speculative
7697        instruction - and we determine if NEXT was speculative by using
7698        ORIG_PAT field.  Except one case - speculation checks have ORIG_PAT
7699        pat too, so skip them.  */
7700     {
7701       bool success = haifa_change_pattern (next, ORIG_PAT (next));
7702       gcc_assert (success);
7703       ORIG_PAT (next) = 0;
7704     }
7705 
7706   if (sched_verbose >= 2)
7707     {
7708       fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
7709                (*current_sched_info->print_insn) (next, 0));
7710 
7711       if (spec_info && spec_info->dump)
7712         {
7713           if (new_ts & BEGIN_DATA)
7714             fprintf (spec_info->dump, "; data-spec;");
7715           if (new_ts & BEGIN_CONTROL)
7716             fprintf (spec_info->dump, "; control-spec;");
7717           if (new_ts & BE_IN_CONTROL)
7718             fprintf (spec_info->dump, "; in-control-spec;");
7719         }
7720       if (TODO_SPEC (next) & DEP_CONTROL)
7721 	fprintf (sched_dump, " predicated");
7722       fprintf (sched_dump, "\n");
7723     }
7724 
7725   adjust_priority (next);
7726 
7727   return fix_tick_ready (next);
7728 }
7729 
7730 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list.  */
7731 static int
fix_tick_ready(rtx_insn * next)7732 fix_tick_ready (rtx_insn *next)
7733 {
7734   int tick, delay;
7735 
7736   if (!DEBUG_INSN_P (next) && !sd_lists_empty_p (next, SD_LIST_RES_BACK))
7737     {
7738       int full_p;
7739       sd_iterator_def sd_it;
7740       dep_t dep;
7741 
7742       tick = INSN_TICK (next);
7743       /* if tick is not equal to INVALID_TICK, then update
7744 	 INSN_TICK of NEXT with the most recent resolved dependence
7745 	 cost.  Otherwise, recalculate from scratch.  */
7746       full_p = (tick == INVALID_TICK);
7747 
7748       FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
7749         {
7750           rtx_insn *pro = DEP_PRO (dep);
7751           int tick1;
7752 
7753 	  gcc_assert (INSN_TICK (pro) >= MIN_TICK);
7754 
7755           tick1 = INSN_TICK (pro) + dep_cost (dep);
7756           if (tick1 > tick)
7757             tick = tick1;
7758 
7759 	  if (!full_p)
7760 	    break;
7761         }
7762     }
7763   else
7764     tick = -1;
7765 
7766   INSN_TICK (next) = tick;
7767 
7768   delay = tick - clock_var;
7769   if (delay <= 0 || sched_pressure != SCHED_PRESSURE_NONE || sched_fusion)
7770     delay = QUEUE_READY;
7771 
7772   change_queue_index (next, delay);
7773 
7774   return delay;
7775 }
7776 
7777 /* Move NEXT to the proper queue list with (DELAY >= 1),
7778    or add it to the ready list (DELAY == QUEUE_READY),
7779    or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE).  */
7780 static void
change_queue_index(rtx_insn * next,int delay)7781 change_queue_index (rtx_insn *next, int delay)
7782 {
7783   int i = QUEUE_INDEX (next);
7784 
7785   gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
7786 	      && delay != 0);
7787   gcc_assert (i != QUEUE_SCHEDULED);
7788 
7789   if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
7790       || (delay < 0 && delay == i))
7791     /* We have nothing to do.  */
7792     return;
7793 
7794   /* Remove NEXT from wherever it is now.  */
7795   if (i == QUEUE_READY)
7796     ready_remove_insn (next);
7797   else if (i >= 0)
7798     queue_remove (next);
7799 
7800   /* Add it to the proper place.  */
7801   if (delay == QUEUE_READY)
7802     ready_add (readyp, next, false);
7803   else if (delay >= 1)
7804     queue_insn (next, delay, "change queue index");
7805 
7806   if (sched_verbose >= 2)
7807     {
7808       fprintf (sched_dump, ";;\t\ttick updated: insn %s",
7809 	       (*current_sched_info->print_insn) (next, 0));
7810 
7811       if (delay == QUEUE_READY)
7812 	fprintf (sched_dump, " into ready\n");
7813       else if (delay >= 1)
7814 	fprintf (sched_dump, " into queue with cost=%d\n", delay);
7815       else
7816 	fprintf (sched_dump, " removed from ready or queue lists\n");
7817     }
7818 }
7819 
7820 static int sched_ready_n_insns = -1;
7821 
7822 /* Initialize per region data structures.  */
7823 void
sched_extend_ready_list(int new_sched_ready_n_insns)7824 sched_extend_ready_list (int new_sched_ready_n_insns)
7825 {
7826   int i;
7827 
7828   if (sched_ready_n_insns == -1)
7829     /* At the first call we need to initialize one more choice_stack
7830        entry.  */
7831     {
7832       i = 0;
7833       sched_ready_n_insns = 0;
7834       scheduled_insns.reserve (new_sched_ready_n_insns);
7835     }
7836   else
7837     i = sched_ready_n_insns + 1;
7838 
7839   ready.veclen = new_sched_ready_n_insns + issue_rate;
7840   ready.vec = XRESIZEVEC (rtx_insn *, ready.vec, ready.veclen);
7841 
7842   gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
7843 
7844   ready_try = (signed char *) xrecalloc (ready_try, new_sched_ready_n_insns,
7845 					 sched_ready_n_insns,
7846 					 sizeof (*ready_try));
7847 
7848   /* We allocate +1 element to save initial state in the choice_stack[0]
7849      entry.  */
7850   choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
7851 			     new_sched_ready_n_insns + 1);
7852 
7853   for (; i <= new_sched_ready_n_insns; i++)
7854     {
7855       choice_stack[i].state = xmalloc (dfa_state_size);
7856 
7857       if (targetm.sched.first_cycle_multipass_init)
7858 	targetm.sched.first_cycle_multipass_init (&(choice_stack[i]
7859 						    .target_data));
7860     }
7861 
7862   sched_ready_n_insns = new_sched_ready_n_insns;
7863 }
7864 
7865 /* Free per region data structures.  */
7866 void
sched_finish_ready_list(void)7867 sched_finish_ready_list (void)
7868 {
7869   int i;
7870 
7871   free (ready.vec);
7872   ready.vec = NULL;
7873   ready.veclen = 0;
7874 
7875   free (ready_try);
7876   ready_try = NULL;
7877 
7878   for (i = 0; i <= sched_ready_n_insns; i++)
7879     {
7880       if (targetm.sched.first_cycle_multipass_fini)
7881 	targetm.sched.first_cycle_multipass_fini (&(choice_stack[i]
7882 						    .target_data));
7883 
7884       free (choice_stack [i].state);
7885     }
7886   free (choice_stack);
7887   choice_stack = NULL;
7888 
7889   sched_ready_n_insns = -1;
7890 }
7891 
7892 static int
haifa_luid_for_non_insn(rtx x)7893 haifa_luid_for_non_insn (rtx x)
7894 {
7895   gcc_assert (NOTE_P (x) || LABEL_P (x));
7896 
7897   return 0;
7898 }
7899 
7900 /* Generates recovery code for INSN.  */
7901 static void
generate_recovery_code(rtx_insn * insn)7902 generate_recovery_code (rtx_insn *insn)
7903 {
7904   if (TODO_SPEC (insn) & BEGIN_SPEC)
7905     begin_speculative_block (insn);
7906 
7907   /* Here we have insn with no dependencies to
7908      instructions other then CHECK_SPEC ones.  */
7909 
7910   if (TODO_SPEC (insn) & BE_IN_SPEC)
7911     add_to_speculative_block (insn);
7912 }
7913 
7914 /* Helper function.
7915    Tries to add speculative dependencies of type FS between instructions
7916    in deps_list L and TWIN.  */
7917 static void
process_insn_forw_deps_be_in_spec(rtx_insn * insn,rtx_insn * twin,ds_t fs)7918 process_insn_forw_deps_be_in_spec (rtx_insn *insn, rtx_insn *twin, ds_t fs)
7919 {
7920   sd_iterator_def sd_it;
7921   dep_t dep;
7922 
7923   FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
7924     {
7925       ds_t ds;
7926       rtx_insn *consumer;
7927 
7928       consumer = DEP_CON (dep);
7929 
7930       ds = DEP_STATUS (dep);
7931 
7932       if (/* If we want to create speculative dep.  */
7933 	  fs
7934 	  /* And we can do that because this is a true dep.  */
7935 	  && (ds & DEP_TYPES) == DEP_TRUE)
7936 	{
7937 	  gcc_assert (!(ds & BE_IN_SPEC));
7938 
7939 	  if (/* If this dep can be overcome with 'begin speculation'.  */
7940 	      ds & BEGIN_SPEC)
7941 	    /* Then we have a choice: keep the dep 'begin speculative'
7942 	       or transform it into 'be in speculative'.  */
7943 	    {
7944 	      if (/* In try_ready we assert that if insn once became ready
7945 		     it can be removed from the ready (or queue) list only
7946 		     due to backend decision.  Hence we can't let the
7947 		     probability of the speculative dep to decrease.  */
7948 		  ds_weak (ds) <= ds_weak (fs))
7949 		{
7950 		  ds_t new_ds;
7951 
7952 		  new_ds = (ds & ~BEGIN_SPEC) | fs;
7953 
7954 		  if (/* consumer can 'be in speculative'.  */
7955 		      sched_insn_is_legitimate_for_speculation_p (consumer,
7956 								  new_ds))
7957 		    /* Transform it to be in speculative.  */
7958 		    ds = new_ds;
7959 		}
7960 	    }
7961 	  else
7962 	    /* Mark the dep as 'be in speculative'.  */
7963 	    ds |= fs;
7964 	}
7965 
7966       {
7967 	dep_def _new_dep, *new_dep = &_new_dep;
7968 
7969 	init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
7970 	sd_add_dep (new_dep, false);
7971       }
7972     }
7973 }
7974 
7975 /* Generates recovery code for BEGIN speculative INSN.  */
7976 static void
begin_speculative_block(rtx_insn * insn)7977 begin_speculative_block (rtx_insn *insn)
7978 {
7979   if (TODO_SPEC (insn) & BEGIN_DATA)
7980     nr_begin_data++;
7981   if (TODO_SPEC (insn) & BEGIN_CONTROL)
7982     nr_begin_control++;
7983 
7984   create_check_block_twin (insn, false);
7985 
7986   TODO_SPEC (insn) &= ~BEGIN_SPEC;
7987 }
7988 
7989 static void haifa_init_insn (rtx_insn *);
7990 
7991 /* Generates recovery code for BE_IN speculative INSN.  */
7992 static void
add_to_speculative_block(rtx_insn * insn)7993 add_to_speculative_block (rtx_insn *insn)
7994 {
7995   ds_t ts;
7996   sd_iterator_def sd_it;
7997   dep_t dep;
7998   rtx_insn_list *twins = NULL;
7999   rtx_vec_t priorities_roots;
8000 
8001   ts = TODO_SPEC (insn);
8002   gcc_assert (!(ts & ~BE_IN_SPEC));
8003 
8004   if (ts & BE_IN_DATA)
8005     nr_be_in_data++;
8006   if (ts & BE_IN_CONTROL)
8007     nr_be_in_control++;
8008 
8009   TODO_SPEC (insn) &= ~BE_IN_SPEC;
8010   gcc_assert (!TODO_SPEC (insn));
8011 
8012   DONE_SPEC (insn) |= ts;
8013 
8014   /* First we convert all simple checks to branchy.  */
8015   for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
8016        sd_iterator_cond (&sd_it, &dep);)
8017     {
8018       rtx_insn *check = DEP_PRO (dep);
8019 
8020       if (IS_SPECULATION_SIMPLE_CHECK_P (check))
8021 	{
8022 	  create_check_block_twin (check, true);
8023 
8024 	  /* Restart search.  */
8025 	  sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
8026 	}
8027       else
8028 	/* Continue search.  */
8029 	sd_iterator_next (&sd_it);
8030     }
8031 
8032   priorities_roots.create (0);
8033   clear_priorities (insn, &priorities_roots);
8034 
8035   while (1)
8036     {
8037       rtx_insn *check, *twin;
8038       basic_block rec;
8039 
8040       /* Get the first backward dependency of INSN.  */
8041       sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
8042       if (!sd_iterator_cond (&sd_it, &dep))
8043 	/* INSN has no backward dependencies left.  */
8044 	break;
8045 
8046       gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
8047 		  && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
8048 		  && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
8049 
8050       check = DEP_PRO (dep);
8051 
8052       gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
8053 		  && QUEUE_INDEX (check) == QUEUE_NOWHERE);
8054 
8055       rec = BLOCK_FOR_INSN (check);
8056 
8057       twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
8058       haifa_init_insn (twin);
8059 
8060       sd_copy_back_deps (twin, insn, true);
8061 
8062       if (sched_verbose && spec_info->dump)
8063         /* INSN_BB (insn) isn't determined for twin insns yet.
8064            So we can't use current_sched_info->print_insn.  */
8065         fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
8066                  INSN_UID (twin), rec->index);
8067 
8068       twins = alloc_INSN_LIST (twin, twins);
8069 
8070       /* Add dependences between TWIN and all appropriate
8071 	 instructions from REC.  */
8072       FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
8073 	{
8074 	  rtx_insn *pro = DEP_PRO (dep);
8075 
8076 	  gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
8077 
8078 	  /* INSN might have dependencies from the instructions from
8079 	     several recovery blocks.  At this iteration we process those
8080 	     producers that reside in REC.  */
8081 	  if (BLOCK_FOR_INSN (pro) == rec)
8082 	    {
8083 	      dep_def _new_dep, *new_dep = &_new_dep;
8084 
8085 	      init_dep (new_dep, pro, twin, REG_DEP_TRUE);
8086 	      sd_add_dep (new_dep, false);
8087 	    }
8088 	}
8089 
8090       process_insn_forw_deps_be_in_spec (insn, twin, ts);
8091 
8092       /* Remove all dependencies between INSN and insns in REC.  */
8093       for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
8094 	   sd_iterator_cond (&sd_it, &dep);)
8095 	{
8096 	  rtx_insn *pro = DEP_PRO (dep);
8097 
8098 	  if (BLOCK_FOR_INSN (pro) == rec)
8099 	    sd_delete_dep (sd_it);
8100 	  else
8101 	    sd_iterator_next (&sd_it);
8102 	}
8103     }
8104 
8105   /* We couldn't have added the dependencies between INSN and TWINS earlier
8106      because that would make TWINS appear in the INSN_BACK_DEPS (INSN).  */
8107   while (twins)
8108     {
8109       rtx_insn *twin;
8110       rtx_insn_list *next_node;
8111 
8112       twin = twins->insn ();
8113 
8114       {
8115 	dep_def _new_dep, *new_dep = &_new_dep;
8116 
8117 	init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
8118 	sd_add_dep (new_dep, false);
8119       }
8120 
8121       next_node = twins->next ();
8122       free_INSN_LIST_node (twins);
8123       twins = next_node;
8124     }
8125 
8126   calc_priorities (priorities_roots);
8127   priorities_roots.release ();
8128 }
8129 
8130 /* Extends and fills with zeros (only the new part) array pointed to by P.  */
8131 void *
xrecalloc(void * p,size_t new_nmemb,size_t old_nmemb,size_t size)8132 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
8133 {
8134   gcc_assert (new_nmemb >= old_nmemb);
8135   p = XRESIZEVAR (void, p, new_nmemb * size);
8136   memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
8137   return p;
8138 }
8139 
8140 /* Helper function.
8141    Find fallthru edge from PRED.  */
8142 edge
find_fallthru_edge_from(basic_block pred)8143 find_fallthru_edge_from (basic_block pred)
8144 {
8145   edge e;
8146   basic_block succ;
8147 
8148   succ = pred->next_bb;
8149   gcc_assert (succ->prev_bb == pred);
8150 
8151   if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
8152     {
8153       e = find_fallthru_edge (pred->succs);
8154 
8155       if (e)
8156 	{
8157 	  gcc_assert (e->dest == succ);
8158 	  return e;
8159 	}
8160     }
8161   else
8162     {
8163       e = find_fallthru_edge (succ->preds);
8164 
8165       if (e)
8166 	{
8167 	  gcc_assert (e->src == pred);
8168 	  return e;
8169 	}
8170     }
8171 
8172   return NULL;
8173 }
8174 
8175 /* Extend per basic block data structures.  */
8176 static void
sched_extend_bb(void)8177 sched_extend_bb (void)
8178 {
8179   /* The following is done to keep current_sched_info->next_tail non null.  */
8180   rtx_insn *end = BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
8181   rtx_insn *insn = DEBUG_INSN_P (end) ? prev_nondebug_insn (end) : end;
8182   if (NEXT_INSN (end) == 0
8183       || (!NOTE_P (insn)
8184 	  && !LABEL_P (insn)
8185 	  /* Don't emit a NOTE if it would end up before a BARRIER.  */
8186 	  && !BARRIER_P (NEXT_INSN (end))))
8187     {
8188       rtx_note *note = emit_note_after (NOTE_INSN_DELETED, end);
8189       /* Make note appear outside BB.  */
8190       set_block_for_insn (note, NULL);
8191       BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb) = end;
8192     }
8193 }
8194 
8195 /* Init per basic block data structures.  */
8196 void
sched_init_bbs(void)8197 sched_init_bbs (void)
8198 {
8199   sched_extend_bb ();
8200 }
8201 
8202 /* Initialize BEFORE_RECOVERY variable.  */
8203 static void
init_before_recovery(basic_block * before_recovery_ptr)8204 init_before_recovery (basic_block *before_recovery_ptr)
8205 {
8206   basic_block last;
8207   edge e;
8208 
8209   last = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
8210   e = find_fallthru_edge_from (last);
8211 
8212   if (e)
8213     {
8214       /* We create two basic blocks:
8215          1. Single instruction block is inserted right after E->SRC
8216          and has jump to
8217          2. Empty block right before EXIT_BLOCK.
8218          Between these two blocks recovery blocks will be emitted.  */
8219 
8220       basic_block single, empty;
8221 
8222       /* If the fallthrough edge to exit we've found is from the block we've
8223 	 created before, don't do anything more.  */
8224       if (last == after_recovery)
8225 	return;
8226 
8227       adding_bb_to_current_region_p = false;
8228 
8229       single = sched_create_empty_bb (last);
8230       empty = sched_create_empty_bb (single);
8231 
8232       /* Add new blocks to the root loop.  */
8233       if (current_loops != NULL)
8234 	{
8235 	  add_bb_to_loop (single, (*current_loops->larray)[0]);
8236 	  add_bb_to_loop (empty, (*current_loops->larray)[0]);
8237 	}
8238 
8239       single->count = last->count;
8240       empty->count = last->count;
8241       single->frequency = last->frequency;
8242       empty->frequency = last->frequency;
8243       BB_COPY_PARTITION (single, last);
8244       BB_COPY_PARTITION (empty, last);
8245 
8246       redirect_edge_succ (e, single);
8247       make_single_succ_edge (single, empty, 0);
8248       make_single_succ_edge (empty, EXIT_BLOCK_PTR_FOR_FN (cfun),
8249 			     EDGE_FALLTHRU);
8250 
8251       rtx_code_label *label = block_label (empty);
8252       rtx_jump_insn *x = emit_jump_insn_after (targetm.gen_jump (label),
8253 					       BB_END (single));
8254       JUMP_LABEL (x) = label;
8255       LABEL_NUSES (label)++;
8256       haifa_init_insn (x);
8257 
8258       emit_barrier_after (x);
8259 
8260       sched_init_only_bb (empty, NULL);
8261       sched_init_only_bb (single, NULL);
8262       sched_extend_bb ();
8263 
8264       adding_bb_to_current_region_p = true;
8265       before_recovery = single;
8266       after_recovery = empty;
8267 
8268       if (before_recovery_ptr)
8269         *before_recovery_ptr = before_recovery;
8270 
8271       if (sched_verbose >= 2 && spec_info->dump)
8272         fprintf (spec_info->dump,
8273 		 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
8274                  last->index, single->index, empty->index);
8275     }
8276   else
8277     before_recovery = last;
8278 }
8279 
8280 /* Returns new recovery block.  */
8281 basic_block
sched_create_recovery_block(basic_block * before_recovery_ptr)8282 sched_create_recovery_block (basic_block *before_recovery_ptr)
8283 {
8284   rtx_insn *barrier;
8285   basic_block rec;
8286 
8287   haifa_recovery_bb_recently_added_p = true;
8288   haifa_recovery_bb_ever_added_p = true;
8289 
8290   init_before_recovery (before_recovery_ptr);
8291 
8292   barrier = get_last_bb_insn (before_recovery);
8293   gcc_assert (BARRIER_P (barrier));
8294 
8295   rtx_insn *label = emit_label_after (gen_label_rtx (), barrier);
8296 
8297   rec = create_basic_block (label, label, before_recovery);
8298 
8299   /* A recovery block always ends with an unconditional jump.  */
8300   emit_barrier_after (BB_END (rec));
8301 
8302   if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
8303     BB_SET_PARTITION (rec, BB_COLD_PARTITION);
8304 
8305   if (sched_verbose && spec_info->dump)
8306     fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
8307              rec->index);
8308 
8309   return rec;
8310 }
8311 
8312 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
8313    and emit necessary jumps.  */
8314 void
sched_create_recovery_edges(basic_block first_bb,basic_block rec,basic_block second_bb)8315 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
8316 			     basic_block second_bb)
8317 {
8318   int edge_flags;
8319 
8320   /* This is fixing of incoming edge.  */
8321   /* ??? Which other flags should be specified?  */
8322   if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
8323     /* Partition type is the same, if it is "unpartitioned".  */
8324     edge_flags = EDGE_CROSSING;
8325   else
8326     edge_flags = 0;
8327 
8328   make_edge (first_bb, rec, edge_flags);
8329   rtx_code_label *label = block_label (second_bb);
8330   rtx_jump_insn *jump = emit_jump_insn_after (targetm.gen_jump (label),
8331 					      BB_END (rec));
8332   JUMP_LABEL (jump) = label;
8333   LABEL_NUSES (label)++;
8334 
8335   if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
8336     /* Partition type is the same, if it is "unpartitioned".  */
8337     {
8338       /* Rewritten from cfgrtl.c.  */
8339       if (flag_reorder_blocks_and_partition
8340 	  && targetm_common.have_named_sections)
8341 	{
8342 	  /* We don't need the same note for the check because
8343 	     any_condjump_p (check) == true.  */
8344 	  CROSSING_JUMP_P (jump) = 1;
8345 	}
8346       edge_flags = EDGE_CROSSING;
8347     }
8348   else
8349     edge_flags = 0;
8350 
8351   make_single_succ_edge (rec, second_bb, edge_flags);
8352   if (dom_info_available_p (CDI_DOMINATORS))
8353     set_immediate_dominator (CDI_DOMINATORS, rec, first_bb);
8354 }
8355 
8356 /* This function creates recovery code for INSN.  If MUTATE_P is nonzero,
8357    INSN is a simple check, that should be converted to branchy one.  */
8358 static void
create_check_block_twin(rtx_insn * insn,bool mutate_p)8359 create_check_block_twin (rtx_insn *insn, bool mutate_p)
8360 {
8361   basic_block rec;
8362   rtx_insn *label, *check, *twin;
8363   rtx check_pat;
8364   ds_t fs;
8365   sd_iterator_def sd_it;
8366   dep_t dep;
8367   dep_def _new_dep, *new_dep = &_new_dep;
8368   ds_t todo_spec;
8369 
8370   gcc_assert (ORIG_PAT (insn) != NULL_RTX);
8371 
8372   if (!mutate_p)
8373     todo_spec = TODO_SPEC (insn);
8374   else
8375     {
8376       gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
8377 		  && (TODO_SPEC (insn) & SPECULATIVE) == 0);
8378 
8379       todo_spec = CHECK_SPEC (insn);
8380     }
8381 
8382   todo_spec &= SPECULATIVE;
8383 
8384   /* Create recovery block.  */
8385   if (mutate_p || targetm.sched.needs_block_p (todo_spec))
8386     {
8387       rec = sched_create_recovery_block (NULL);
8388       label = BB_HEAD (rec);
8389     }
8390   else
8391     {
8392       rec = EXIT_BLOCK_PTR_FOR_FN (cfun);
8393       label = NULL;
8394     }
8395 
8396   /* Emit CHECK.  */
8397   check_pat = targetm.sched.gen_spec_check (insn, label, todo_spec);
8398 
8399   if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8400     {
8401       /* To have mem_reg alive at the beginning of second_bb,
8402 	 we emit check BEFORE insn, so insn after splitting
8403 	 insn will be at the beginning of second_bb, which will
8404 	 provide us with the correct life information.  */
8405       check = emit_jump_insn_before (check_pat, insn);
8406       JUMP_LABEL (check) = label;
8407       LABEL_NUSES (label)++;
8408     }
8409   else
8410     check = emit_insn_before (check_pat, insn);
8411 
8412   /* Extend data structures.  */
8413   haifa_init_insn (check);
8414 
8415   /* CHECK is being added to current region.  Extend ready list.  */
8416   gcc_assert (sched_ready_n_insns != -1);
8417   sched_extend_ready_list (sched_ready_n_insns + 1);
8418 
8419   if (current_sched_info->add_remove_insn)
8420     current_sched_info->add_remove_insn (insn, 0);
8421 
8422   RECOVERY_BLOCK (check) = rec;
8423 
8424   if (sched_verbose && spec_info->dump)
8425     fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
8426              (*current_sched_info->print_insn) (check, 0));
8427 
8428   gcc_assert (ORIG_PAT (insn));
8429 
8430   /* Initialize TWIN (twin is a duplicate of original instruction
8431      in the recovery block).  */
8432   if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8433     {
8434       sd_iterator_def sd_it;
8435       dep_t dep;
8436 
8437       FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
8438 	if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
8439 	  {
8440 	    struct _dep _dep2, *dep2 = &_dep2;
8441 
8442 	    init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
8443 
8444 	    sd_add_dep (dep2, true);
8445 	  }
8446 
8447       twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
8448       haifa_init_insn (twin);
8449 
8450       if (sched_verbose && spec_info->dump)
8451 	/* INSN_BB (insn) isn't determined for twin insns yet.
8452 	   So we can't use current_sched_info->print_insn.  */
8453 	fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
8454 		 INSN_UID (twin), rec->index);
8455     }
8456   else
8457     {
8458       ORIG_PAT (check) = ORIG_PAT (insn);
8459       HAS_INTERNAL_DEP (check) = 1;
8460       twin = check;
8461       /* ??? We probably should change all OUTPUT dependencies to
8462 	 (TRUE | OUTPUT).  */
8463     }
8464 
8465   /* Copy all resolved back dependencies of INSN to TWIN.  This will
8466      provide correct value for INSN_TICK (TWIN).  */
8467   sd_copy_back_deps (twin, insn, true);
8468 
8469   if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8470     /* In case of branchy check, fix CFG.  */
8471     {
8472       basic_block first_bb, second_bb;
8473       rtx_insn *jump;
8474 
8475       first_bb = BLOCK_FOR_INSN (check);
8476       second_bb = sched_split_block (first_bb, check);
8477 
8478       sched_create_recovery_edges (first_bb, rec, second_bb);
8479 
8480       sched_init_only_bb (second_bb, first_bb);
8481       sched_init_only_bb (rec, EXIT_BLOCK_PTR_FOR_FN (cfun));
8482 
8483       jump = BB_END (rec);
8484       haifa_init_insn (jump);
8485     }
8486 
8487   /* Move backward dependences from INSN to CHECK and
8488      move forward dependences from INSN to TWIN.  */
8489 
8490   /* First, create dependencies between INSN's producers and CHECK & TWIN.  */
8491   FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
8492     {
8493       rtx_insn *pro = DEP_PRO (dep);
8494       ds_t ds;
8495 
8496       /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
8497 	 check --TRUE--> producer  ??? or ANTI ???
8498 	 twin  --TRUE--> producer
8499 	 twin  --ANTI--> check
8500 
8501 	 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
8502 	 check --ANTI--> producer
8503 	 twin  --ANTI--> producer
8504 	 twin  --ANTI--> check
8505 
8506 	 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
8507 	 check ~~TRUE~~> producer
8508 	 twin  ~~TRUE~~> producer
8509 	 twin  --ANTI--> check  */
8510 
8511       ds = DEP_STATUS (dep);
8512 
8513       if (ds & BEGIN_SPEC)
8514 	{
8515 	  gcc_assert (!mutate_p);
8516 	  ds &= ~BEGIN_SPEC;
8517 	}
8518 
8519       init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
8520       sd_add_dep (new_dep, false);
8521 
8522       if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8523 	{
8524 	  DEP_CON (new_dep) = twin;
8525 	  sd_add_dep (new_dep, false);
8526 	}
8527     }
8528 
8529   /* Second, remove backward dependencies of INSN.  */
8530   for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
8531        sd_iterator_cond (&sd_it, &dep);)
8532     {
8533       if ((DEP_STATUS (dep) & BEGIN_SPEC)
8534 	  || mutate_p)
8535 	/* We can delete this dep because we overcome it with
8536 	   BEGIN_SPECULATION.  */
8537 	sd_delete_dep (sd_it);
8538       else
8539 	sd_iterator_next (&sd_it);
8540     }
8541 
8542   /* Future Speculations.  Determine what BE_IN speculations will be like.  */
8543   fs = 0;
8544 
8545   /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
8546      here.  */
8547 
8548   gcc_assert (!DONE_SPEC (insn));
8549 
8550   if (!mutate_p)
8551     {
8552       ds_t ts = TODO_SPEC (insn);
8553 
8554       DONE_SPEC (insn) = ts & BEGIN_SPEC;
8555       CHECK_SPEC (check) = ts & BEGIN_SPEC;
8556 
8557       /* Luckiness of future speculations solely depends upon initial
8558 	 BEGIN speculation.  */
8559       if (ts & BEGIN_DATA)
8560 	fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
8561       if (ts & BEGIN_CONTROL)
8562 	fs = set_dep_weak (fs, BE_IN_CONTROL,
8563 			   get_dep_weak (ts, BEGIN_CONTROL));
8564     }
8565   else
8566     CHECK_SPEC (check) = CHECK_SPEC (insn);
8567 
8568   /* Future speculations: call the helper.  */
8569   process_insn_forw_deps_be_in_spec (insn, twin, fs);
8570 
8571   if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8572     {
8573       /* Which types of dependencies should we use here is,
8574 	 generally, machine-dependent question...  But, for now,
8575 	 it is not.  */
8576 
8577       if (!mutate_p)
8578 	{
8579 	  init_dep (new_dep, insn, check, REG_DEP_TRUE);
8580 	  sd_add_dep (new_dep, false);
8581 
8582 	  init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
8583 	  sd_add_dep (new_dep, false);
8584 	}
8585       else
8586 	{
8587 	  if (spec_info->dump)
8588 	    fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
8589 		     (*current_sched_info->print_insn) (insn, 0));
8590 
8591 	  /* Remove all dependencies of the INSN.  */
8592 	  {
8593 	    sd_it = sd_iterator_start (insn, (SD_LIST_FORW
8594 					      | SD_LIST_BACK
8595 					      | SD_LIST_RES_BACK));
8596 	    while (sd_iterator_cond (&sd_it, &dep))
8597 	      sd_delete_dep (sd_it);
8598 	  }
8599 
8600 	  /* If former check (INSN) already was moved to the ready (or queue)
8601 	     list, add new check (CHECK) there too.  */
8602 	  if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
8603 	    try_ready (check);
8604 
8605 	  /* Remove old check from instruction stream and free its
8606 	     data.  */
8607 	  sched_remove_insn (insn);
8608 	}
8609 
8610       init_dep (new_dep, check, twin, REG_DEP_ANTI);
8611       sd_add_dep (new_dep, false);
8612     }
8613   else
8614     {
8615       init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
8616       sd_add_dep (new_dep, false);
8617     }
8618 
8619   if (!mutate_p)
8620     /* Fix priorities.  If MUTATE_P is nonzero, this is not necessary,
8621        because it'll be done later in add_to_speculative_block.  */
8622     {
8623       rtx_vec_t priorities_roots = rtx_vec_t ();
8624 
8625       clear_priorities (twin, &priorities_roots);
8626       calc_priorities (priorities_roots);
8627       priorities_roots.release ();
8628     }
8629 }
8630 
8631 /* Removes dependency between instructions in the recovery block REC
8632    and usual region instructions.  It keeps inner dependences so it
8633    won't be necessary to recompute them.  */
8634 static void
fix_recovery_deps(basic_block rec)8635 fix_recovery_deps (basic_block rec)
8636 {
8637   rtx_insn *note, *insn, *jump;
8638   rtx_insn_list *ready_list = 0;
8639   bitmap_head in_ready;
8640   rtx_insn_list *link;
8641 
8642   bitmap_initialize (&in_ready, 0);
8643 
8644   /* NOTE - a basic block note.  */
8645   note = NEXT_INSN (BB_HEAD (rec));
8646   gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8647   insn = BB_END (rec);
8648   gcc_assert (JUMP_P (insn));
8649   insn = PREV_INSN (insn);
8650 
8651   do
8652     {
8653       sd_iterator_def sd_it;
8654       dep_t dep;
8655 
8656       for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
8657 	   sd_iterator_cond (&sd_it, &dep);)
8658 	{
8659 	  rtx_insn *consumer = DEP_CON (dep);
8660 
8661 	  if (BLOCK_FOR_INSN (consumer) != rec)
8662 	    {
8663 	      sd_delete_dep (sd_it);
8664 
8665 	      if (bitmap_set_bit (&in_ready, INSN_LUID (consumer)))
8666 		ready_list = alloc_INSN_LIST (consumer, ready_list);
8667 	    }
8668 	  else
8669 	    {
8670 	      gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
8671 
8672 	      sd_iterator_next (&sd_it);
8673 	    }
8674 	}
8675 
8676       insn = PREV_INSN (insn);
8677     }
8678   while (insn != note);
8679 
8680   bitmap_clear (&in_ready);
8681 
8682   /* Try to add instructions to the ready or queue list.  */
8683   for (link = ready_list; link; link = link->next ())
8684     try_ready (link->insn ());
8685   free_INSN_LIST_list (&ready_list);
8686 
8687   /* Fixing jump's dependences.  */
8688   insn = BB_HEAD (rec);
8689   jump = BB_END (rec);
8690 
8691   gcc_assert (LABEL_P (insn));
8692   insn = NEXT_INSN (insn);
8693 
8694   gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
8695   add_jump_dependencies (insn, jump);
8696 }
8697 
8698 /* Change pattern of INSN to NEW_PAT.  Invalidate cached haifa
8699    instruction data.  */
8700 static bool
haifa_change_pattern(rtx_insn * insn,rtx new_pat)8701 haifa_change_pattern (rtx_insn *insn, rtx new_pat)
8702 {
8703   int t;
8704 
8705   t = validate_change (insn, &PATTERN (insn), new_pat, 0);
8706   if (!t)
8707     return false;
8708 
8709   update_insn_after_change (insn);
8710   return true;
8711 }
8712 
8713 /* -1 - can't speculate,
8714    0 - for speculation with REQUEST mode it is OK to use
8715    current instruction pattern,
8716    1 - need to change pattern for *NEW_PAT to be speculative.  */
8717 int
sched_speculate_insn(rtx_insn * insn,ds_t request,rtx * new_pat)8718 sched_speculate_insn (rtx_insn *insn, ds_t request, rtx *new_pat)
8719 {
8720   gcc_assert (current_sched_info->flags & DO_SPECULATION
8721               && (request & SPECULATIVE)
8722 	      && sched_insn_is_legitimate_for_speculation_p (insn, request));
8723 
8724   if ((request & spec_info->mask) != request)
8725     return -1;
8726 
8727   if (request & BE_IN_SPEC
8728       && !(request & BEGIN_SPEC))
8729     return 0;
8730 
8731   return targetm.sched.speculate_insn (insn, request, new_pat);
8732 }
8733 
8734 static int
haifa_speculate_insn(rtx_insn * insn,ds_t request,rtx * new_pat)8735 haifa_speculate_insn (rtx_insn *insn, ds_t request, rtx *new_pat)
8736 {
8737   gcc_assert (sched_deps_info->generate_spec_deps
8738 	      && !IS_SPECULATION_CHECK_P (insn));
8739 
8740   if (HAS_INTERNAL_DEP (insn)
8741       || SCHED_GROUP_P (insn))
8742     return -1;
8743 
8744   return sched_speculate_insn (insn, request, new_pat);
8745 }
8746 
8747 /* Print some information about block BB, which starts with HEAD and
8748    ends with TAIL, before scheduling it.
8749    I is zero, if scheduler is about to start with the fresh ebb.  */
8750 static void
dump_new_block_header(int i,basic_block bb,rtx_insn * head,rtx_insn * tail)8751 dump_new_block_header (int i, basic_block bb, rtx_insn *head, rtx_insn *tail)
8752 {
8753   if (!i)
8754     fprintf (sched_dump,
8755 	     ";;   ======================================================\n");
8756   else
8757     fprintf (sched_dump,
8758 	     ";;   =====================ADVANCING TO=====================\n");
8759   fprintf (sched_dump,
8760 	   ";;   -- basic block %d from %d to %d -- %s reload\n",
8761 	   bb->index, INSN_UID (head), INSN_UID (tail),
8762 	   (reload_completed ? "after" : "before"));
8763   fprintf (sched_dump,
8764 	   ";;   ======================================================\n");
8765   fprintf (sched_dump, "\n");
8766 }
8767 
8768 /* Unlink basic block notes and labels and saves them, so they
8769    can be easily restored.  We unlink basic block notes in EBB to
8770    provide back-compatibility with the previous code, as target backends
8771    assume, that there'll be only instructions between
8772    current_sched_info->{head and tail}.  We restore these notes as soon
8773    as we can.
8774    FIRST (LAST) is the first (last) basic block in the ebb.
8775    NB: In usual case (FIRST == LAST) nothing is really done.  */
8776 void
unlink_bb_notes(basic_block first,basic_block last)8777 unlink_bb_notes (basic_block first, basic_block last)
8778 {
8779   /* We DON'T unlink basic block notes of the first block in the ebb.  */
8780   if (first == last)
8781     return;
8782 
8783   bb_header = XNEWVEC (rtx_insn *, last_basic_block_for_fn (cfun));
8784 
8785   /* Make a sentinel.  */
8786   if (last->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
8787     bb_header[last->next_bb->index] = 0;
8788 
8789   first = first->next_bb;
8790   do
8791     {
8792       rtx_insn *prev, *label, *note, *next;
8793 
8794       label = BB_HEAD (last);
8795       if (LABEL_P (label))
8796 	note = NEXT_INSN (label);
8797       else
8798 	note = label;
8799       gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8800 
8801       prev = PREV_INSN (label);
8802       next = NEXT_INSN (note);
8803       gcc_assert (prev && next);
8804 
8805       SET_NEXT_INSN (prev) = next;
8806       SET_PREV_INSN (next) = prev;
8807 
8808       bb_header[last->index] = label;
8809 
8810       if (last == first)
8811 	break;
8812 
8813       last = last->prev_bb;
8814     }
8815   while (1);
8816 }
8817 
8818 /* Restore basic block notes.
8819    FIRST is the first basic block in the ebb.  */
8820 static void
restore_bb_notes(basic_block first)8821 restore_bb_notes (basic_block first)
8822 {
8823   if (!bb_header)
8824     return;
8825 
8826   /* We DON'T unlink basic block notes of the first block in the ebb.  */
8827   first = first->next_bb;
8828   /* Remember: FIRST is actually a second basic block in the ebb.  */
8829 
8830   while (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
8831 	 && bb_header[first->index])
8832     {
8833       rtx_insn *prev, *label, *note, *next;
8834 
8835       label = bb_header[first->index];
8836       prev = PREV_INSN (label);
8837       next = NEXT_INSN (prev);
8838 
8839       if (LABEL_P (label))
8840 	note = NEXT_INSN (label);
8841       else
8842 	note = label;
8843       gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8844 
8845       bb_header[first->index] = 0;
8846 
8847       SET_NEXT_INSN (prev) = label;
8848       SET_NEXT_INSN (note) = next;
8849       SET_PREV_INSN (next) = note;
8850 
8851       first = first->next_bb;
8852     }
8853 
8854   free (bb_header);
8855   bb_header = 0;
8856 }
8857 
8858 /* Helper function.
8859    Fix CFG after both in- and inter-block movement of
8860    control_flow_insn_p JUMP.  */
8861 static void
fix_jump_move(rtx_insn * jump)8862 fix_jump_move (rtx_insn *jump)
8863 {
8864   basic_block bb, jump_bb, jump_bb_next;
8865 
8866   bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8867   jump_bb = BLOCK_FOR_INSN (jump);
8868   jump_bb_next = jump_bb->next_bb;
8869 
8870   gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
8871 	      || IS_SPECULATION_BRANCHY_CHECK_P (jump));
8872 
8873   if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
8874     /* if jump_bb_next is not empty.  */
8875     BB_END (jump_bb) = BB_END (jump_bb_next);
8876 
8877   if (BB_END (bb) != PREV_INSN (jump))
8878     /* Then there are instruction after jump that should be placed
8879        to jump_bb_next.  */
8880     BB_END (jump_bb_next) = BB_END (bb);
8881   else
8882     /* Otherwise jump_bb_next is empty.  */
8883     BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
8884 
8885   /* To make assertion in move_insn happy.  */
8886   BB_END (bb) = PREV_INSN (jump);
8887 
8888   update_bb_for_insn (jump_bb_next);
8889 }
8890 
8891 /* Fix CFG after interblock movement of control_flow_insn_p JUMP.  */
8892 static void
move_block_after_check(rtx_insn * jump)8893 move_block_after_check (rtx_insn *jump)
8894 {
8895   basic_block bb, jump_bb, jump_bb_next;
8896   vec<edge, va_gc> *t;
8897 
8898   bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8899   jump_bb = BLOCK_FOR_INSN (jump);
8900   jump_bb_next = jump_bb->next_bb;
8901 
8902   update_bb_for_insn (jump_bb);
8903 
8904   gcc_assert (IS_SPECULATION_CHECK_P (jump)
8905 	      || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
8906 
8907   unlink_block (jump_bb_next);
8908   link_block (jump_bb_next, bb);
8909 
8910   t = bb->succs;
8911   bb->succs = 0;
8912   move_succs (&(jump_bb->succs), bb);
8913   move_succs (&(jump_bb_next->succs), jump_bb);
8914   move_succs (&t, jump_bb_next);
8915 
8916   df_mark_solutions_dirty ();
8917 
8918   common_sched_info->fix_recovery_cfg
8919     (bb->index, jump_bb->index, jump_bb_next->index);
8920 }
8921 
8922 /* Helper function for move_block_after_check.
8923    This functions attaches edge vector pointed to by SUCCSP to
8924    block TO.  */
8925 static void
move_succs(vec<edge,va_gc> ** succsp,basic_block to)8926 move_succs (vec<edge, va_gc> **succsp, basic_block to)
8927 {
8928   edge e;
8929   edge_iterator ei;
8930 
8931   gcc_assert (to->succs == 0);
8932 
8933   to->succs = *succsp;
8934 
8935   FOR_EACH_EDGE (e, ei, to->succs)
8936     e->src = to;
8937 
8938   *succsp = 0;
8939 }
8940 
8941 /* Remove INSN from the instruction stream.
8942    INSN should have any dependencies.  */
8943 static void
sched_remove_insn(rtx_insn * insn)8944 sched_remove_insn (rtx_insn *insn)
8945 {
8946   sd_finish_insn (insn);
8947 
8948   change_queue_index (insn, QUEUE_NOWHERE);
8949   current_sched_info->add_remove_insn (insn, 1);
8950   delete_insn (insn);
8951 }
8952 
8953 /* Clear priorities of all instructions, that are forward dependent on INSN.
8954    Store in vector pointed to by ROOTS_PTR insns on which priority () should
8955    be invoked to initialize all cleared priorities.  */
8956 static void
clear_priorities(rtx_insn * insn,rtx_vec_t * roots_ptr)8957 clear_priorities (rtx_insn *insn, rtx_vec_t *roots_ptr)
8958 {
8959   sd_iterator_def sd_it;
8960   dep_t dep;
8961   bool insn_is_root_p = true;
8962 
8963   gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
8964 
8965   FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
8966     {
8967       rtx_insn *pro = DEP_PRO (dep);
8968 
8969       if (INSN_PRIORITY_STATUS (pro) >= 0
8970 	  && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
8971 	{
8972 	  /* If DEP doesn't contribute to priority then INSN itself should
8973 	     be added to priority roots.  */
8974 	  if (contributes_to_priority_p (dep))
8975 	    insn_is_root_p = false;
8976 
8977 	  INSN_PRIORITY_STATUS (pro) = -1;
8978 	  clear_priorities (pro, roots_ptr);
8979 	}
8980     }
8981 
8982   if (insn_is_root_p)
8983     roots_ptr->safe_push (insn);
8984 }
8985 
8986 /* Recompute priorities of instructions, whose priorities might have been
8987    changed.  ROOTS is a vector of instructions whose priority computation will
8988    trigger initialization of all cleared priorities.  */
8989 static void
calc_priorities(rtx_vec_t roots)8990 calc_priorities (rtx_vec_t roots)
8991 {
8992   int i;
8993   rtx_insn *insn;
8994 
8995   FOR_EACH_VEC_ELT (roots, i, insn)
8996     priority (insn);
8997 }
8998 
8999 
9000 /* Add dependences between JUMP and other instructions in the recovery
9001    block.  INSN is the first insn the recovery block.  */
9002 static void
add_jump_dependencies(rtx_insn * insn,rtx_insn * jump)9003 add_jump_dependencies (rtx_insn *insn, rtx_insn *jump)
9004 {
9005   do
9006     {
9007       insn = NEXT_INSN (insn);
9008       if (insn == jump)
9009 	break;
9010 
9011       if (dep_list_size (insn, SD_LIST_FORW) == 0)
9012 	{
9013 	  dep_def _new_dep, *new_dep = &_new_dep;
9014 
9015 	  init_dep (new_dep, insn, jump, REG_DEP_ANTI);
9016 	  sd_add_dep (new_dep, false);
9017 	}
9018     }
9019   while (1);
9020 
9021   gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
9022 }
9023 
9024 /* Extend data structures for logical insn UID.  */
9025 void
sched_extend_luids(void)9026 sched_extend_luids (void)
9027 {
9028   int new_luids_max_uid = get_max_uid () + 1;
9029 
9030   sched_luids.safe_grow_cleared (new_luids_max_uid);
9031 }
9032 
9033 /* Initialize LUID for INSN.  */
9034 void
sched_init_insn_luid(rtx_insn * insn)9035 sched_init_insn_luid (rtx_insn *insn)
9036 {
9037   int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
9038   int luid;
9039 
9040   if (i >= 0)
9041     {
9042       luid = sched_max_luid;
9043       sched_max_luid += i;
9044     }
9045   else
9046     luid = -1;
9047 
9048   SET_INSN_LUID (insn, luid);
9049 }
9050 
9051 /* Initialize luids for BBS.
9052    The hook common_sched_info->luid_for_non_insn () is used to determine
9053    if notes, labels, etc. need luids.  */
9054 void
sched_init_luids(bb_vec_t bbs)9055 sched_init_luids (bb_vec_t bbs)
9056 {
9057   int i;
9058   basic_block bb;
9059 
9060   sched_extend_luids ();
9061   FOR_EACH_VEC_ELT (bbs, i, bb)
9062     {
9063       rtx_insn *insn;
9064 
9065       FOR_BB_INSNS (bb, insn)
9066 	sched_init_insn_luid (insn);
9067     }
9068 }
9069 
9070 /* Free LUIDs.  */
9071 void
sched_finish_luids(void)9072 sched_finish_luids (void)
9073 {
9074   sched_luids.release ();
9075   sched_max_luid = 1;
9076 }
9077 
9078 /* Return logical uid of INSN.  Helpful while debugging.  */
9079 int
insn_luid(rtx_insn * insn)9080 insn_luid (rtx_insn *insn)
9081 {
9082   return INSN_LUID (insn);
9083 }
9084 
9085 /* Extend per insn data in the target.  */
9086 void
sched_extend_target(void)9087 sched_extend_target (void)
9088 {
9089   if (targetm.sched.h_i_d_extended)
9090     targetm.sched.h_i_d_extended ();
9091 }
9092 
9093 /* Extend global scheduler structures (those, that live across calls to
9094    schedule_block) to include information about just emitted INSN.  */
9095 static void
extend_h_i_d(void)9096 extend_h_i_d (void)
9097 {
9098   int reserve = (get_max_uid () + 1 - h_i_d.length ());
9099   if (reserve > 0
9100       && ! h_i_d.space (reserve))
9101     {
9102       h_i_d.safe_grow_cleared (3 * get_max_uid () / 2);
9103       sched_extend_target ();
9104     }
9105 }
9106 
9107 /* Initialize h_i_d entry of the INSN with default values.
9108    Values, that are not explicitly initialized here, hold zero.  */
9109 static void
init_h_i_d(rtx_insn * insn)9110 init_h_i_d (rtx_insn *insn)
9111 {
9112   if (INSN_LUID (insn) > 0)
9113     {
9114       INSN_COST (insn) = -1;
9115       QUEUE_INDEX (insn) = QUEUE_NOWHERE;
9116       INSN_TICK (insn) = INVALID_TICK;
9117       INSN_EXACT_TICK (insn) = INVALID_TICK;
9118       INTER_TICK (insn) = INVALID_TICK;
9119       TODO_SPEC (insn) = HARD_DEP;
9120       INSN_AUTOPREF_MULTIPASS_DATA (insn)[0].status
9121 	= AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
9122       INSN_AUTOPREF_MULTIPASS_DATA (insn)[1].status
9123 	= AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
9124     }
9125 }
9126 
9127 /* Initialize haifa_insn_data for BBS.  */
9128 void
haifa_init_h_i_d(bb_vec_t bbs)9129 haifa_init_h_i_d (bb_vec_t bbs)
9130 {
9131   int i;
9132   basic_block bb;
9133 
9134   extend_h_i_d ();
9135   FOR_EACH_VEC_ELT (bbs, i, bb)
9136     {
9137       rtx_insn *insn;
9138 
9139       FOR_BB_INSNS (bb, insn)
9140 	init_h_i_d (insn);
9141     }
9142 }
9143 
9144 /* Finalize haifa_insn_data.  */
9145 void
haifa_finish_h_i_d(void)9146 haifa_finish_h_i_d (void)
9147 {
9148   int i;
9149   haifa_insn_data_t data;
9150   reg_use_data *use, *next_use;
9151   reg_set_data *set, *next_set;
9152 
9153   FOR_EACH_VEC_ELT (h_i_d, i, data)
9154     {
9155       free (data->max_reg_pressure);
9156       free (data->reg_pressure);
9157       for (use = data->reg_use_list; use != NULL; use = next_use)
9158 	{
9159 	  next_use = use->next_insn_use;
9160 	  free (use);
9161 	}
9162       for (set = data->reg_set_list; set != NULL; set = next_set)
9163 	{
9164 	  next_set = set->next_insn_set;
9165 	  free (set);
9166 	}
9167 
9168     }
9169   h_i_d.release ();
9170 }
9171 
9172 /* Init data for the new insn INSN.  */
9173 static void
haifa_init_insn(rtx_insn * insn)9174 haifa_init_insn (rtx_insn *insn)
9175 {
9176   gcc_assert (insn != NULL);
9177 
9178   sched_extend_luids ();
9179   sched_init_insn_luid (insn);
9180   sched_extend_target ();
9181   sched_deps_init (false);
9182   extend_h_i_d ();
9183   init_h_i_d (insn);
9184 
9185   if (adding_bb_to_current_region_p)
9186     {
9187       sd_init_insn (insn);
9188 
9189       /* Extend dependency caches by one element.  */
9190       extend_dependency_caches (1, false);
9191     }
9192   if (sched_pressure != SCHED_PRESSURE_NONE)
9193     init_insn_reg_pressure_info (insn);
9194 }
9195 
9196 /* Init data for the new basic block BB which comes after AFTER.  */
9197 static void
haifa_init_only_bb(basic_block bb,basic_block after)9198 haifa_init_only_bb (basic_block bb, basic_block after)
9199 {
9200   gcc_assert (bb != NULL);
9201 
9202   sched_init_bbs ();
9203 
9204   if (common_sched_info->add_block)
9205     /* This changes only data structures of the front-end.  */
9206     common_sched_info->add_block (bb, after);
9207 }
9208 
9209 /* A generic version of sched_split_block ().  */
9210 basic_block
sched_split_block_1(basic_block first_bb,rtx after)9211 sched_split_block_1 (basic_block first_bb, rtx after)
9212 {
9213   edge e;
9214 
9215   e = split_block (first_bb, after);
9216   gcc_assert (e->src == first_bb);
9217 
9218   /* sched_split_block emits note if *check == BB_END.  Probably it
9219      is better to rip that note off.  */
9220 
9221   return e->dest;
9222 }
9223 
9224 /* A generic version of sched_create_empty_bb ().  */
9225 basic_block
sched_create_empty_bb_1(basic_block after)9226 sched_create_empty_bb_1 (basic_block after)
9227 {
9228   return create_empty_bb (after);
9229 }
9230 
9231 /* Insert PAT as an INSN into the schedule and update the necessary data
9232    structures to account for it. */
9233 rtx_insn *
sched_emit_insn(rtx pat)9234 sched_emit_insn (rtx pat)
9235 {
9236   rtx_insn *insn = emit_insn_before (pat, first_nonscheduled_insn ());
9237   haifa_init_insn (insn);
9238 
9239   if (current_sched_info->add_remove_insn)
9240     current_sched_info->add_remove_insn (insn, 0);
9241 
9242   (*current_sched_info->begin_schedule_ready) (insn);
9243   scheduled_insns.safe_push (insn);
9244 
9245   last_scheduled_insn = insn;
9246   return insn;
9247 }
9248 
9249 /* This function returns a candidate satisfying dispatch constraints from
9250    the ready list.  */
9251 
9252 static rtx_insn *
ready_remove_first_dispatch(struct ready_list * ready)9253 ready_remove_first_dispatch (struct ready_list *ready)
9254 {
9255   int i;
9256   rtx_insn *insn = ready_element (ready, 0);
9257 
9258   if (ready->n_ready == 1
9259       || !INSN_P (insn)
9260       || INSN_CODE (insn) < 0
9261       || !active_insn_p (insn)
9262       || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
9263     return ready_remove_first (ready);
9264 
9265   for (i = 1; i < ready->n_ready; i++)
9266     {
9267       insn = ready_element (ready, i);
9268 
9269       if (!INSN_P (insn)
9270 	  || INSN_CODE (insn) < 0
9271 	  || !active_insn_p (insn))
9272 	continue;
9273 
9274       if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
9275 	{
9276 	  /* Return ith element of ready.  */
9277 	  insn = ready_remove (ready, i);
9278 	  return insn;
9279 	}
9280     }
9281 
9282   if (targetm.sched.dispatch (NULL, DISPATCH_VIOLATION))
9283     return ready_remove_first (ready);
9284 
9285   for (i = 1; i < ready->n_ready; i++)
9286     {
9287       insn = ready_element (ready, i);
9288 
9289       if (!INSN_P (insn)
9290 	  || INSN_CODE (insn) < 0
9291 	  || !active_insn_p (insn))
9292 	continue;
9293 
9294       /* Return i-th element of ready.  */
9295       if (targetm.sched.dispatch (insn, IS_CMP))
9296 	return ready_remove (ready, i);
9297     }
9298 
9299   return ready_remove_first (ready);
9300 }
9301 
9302 /* Get number of ready insn in the ready list.  */
9303 
9304 int
number_in_ready(void)9305 number_in_ready (void)
9306 {
9307   return ready.n_ready;
9308 }
9309 
9310 /* Get number of ready's in the ready list.  */
9311 
9312 rtx_insn *
get_ready_element(int i)9313 get_ready_element (int i)
9314 {
9315   return ready_element (&ready, i);
9316 }
9317 
9318 #endif /* INSN_SCHEDULING */
9319