xref: /openbsd/gnu/gcc/gcc/haifa-sched.c (revision 404b540a)
1 /* Instruction scheduling pass.
2    Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3    2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4    Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5    and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 
7 This file is part of GCC.
8 
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
12 version.
13 
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
17 for more details.
18 
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING.  If not, write to the Free
21 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
22 02110-1301, USA.  */
23 
24 /* Instruction scheduling pass.  This file, along with sched-deps.c,
25    contains the generic parts.  The actual entry point is found for
26    the normal instruction scheduling pass is found in sched-rgn.c.
27 
28    We compute insn priorities based on data dependencies.  Flow
29    analysis only creates a fraction of the data-dependencies we must
30    observe: namely, only those dependencies which the combiner can be
31    expected to use.  For this pass, we must therefore create the
32    remaining dependencies we need to observe: register dependencies,
33    memory dependencies, dependencies to keep function calls in order,
34    and the dependence between a conditional branch and the setting of
35    condition codes are all dealt with here.
36 
37    The scheduler first traverses the data flow graph, starting with
38    the last instruction, and proceeding to the first, assigning values
39    to insn_priority as it goes.  This sorts the instructions
40    topologically by data dependence.
41 
42    Once priorities have been established, we order the insns using
43    list scheduling.  This works as follows: starting with a list of
44    all the ready insns, and sorted according to priority number, we
45    schedule the insn from the end of the list by placing its
46    predecessors in the list according to their priority order.  We
47    consider this insn scheduled by setting the pointer to the "end" of
48    the list to point to the previous insn.  When an insn has no
49    predecessors, we either queue it until sufficient time has elapsed
50    or add it to the ready list.  As the instructions are scheduled or
51    when stalls are introduced, the queue advances and dumps insns into
52    the ready list.  When all insns down to the lowest priority have
53    been scheduled, the critical path of the basic block has been made
54    as short as possible.  The remaining insns are then scheduled in
55    remaining slots.
56 
57    The following list shows the order in which we want to break ties
58    among insns in the ready list:
59 
60    1.  choose insn with the longest path to end of bb, ties
61    broken by
62    2.  choose insn with least contribution to register pressure,
63    ties broken by
64    3.  prefer in-block upon interblock motion, ties broken by
65    4.  prefer useful upon speculative motion, ties broken by
66    5.  choose insn with largest control flow probability, ties
67    broken by
68    6.  choose insn with the least dependences upon the previously
69    scheduled insn, or finally
70    7   choose the insn which has the most insns dependent on it.
71    8.  choose insn with lowest UID.
72 
73    Memory references complicate matters.  Only if we can be certain
74    that memory references are not part of the data dependency graph
75    (via true, anti, or output dependence), can we move operations past
76    memory references.  To first approximation, reads can be done
77    independently, while writes introduce dependencies.  Better
78    approximations will yield fewer dependencies.
79 
80    Before reload, an extended analysis of interblock data dependences
81    is required for interblock scheduling.  This is performed in
82    compute_block_backward_dependences ().
83 
84    Dependencies set up by memory references are treated in exactly the
85    same way as other dependencies, by using LOG_LINKS backward
86    dependences.  LOG_LINKS are translated into INSN_DEPEND forward
87    dependences for the purpose of forward list scheduling.
88 
89    Having optimized the critical path, we may have also unduly
90    extended the lifetimes of some registers.  If an operation requires
91    that constants be loaded into registers, it is certainly desirable
92    to load those constants as early as necessary, but no earlier.
93    I.e., it will not do to load up a bunch of registers at the
94    beginning of a basic block only to use them at the end, if they
95    could be loaded later, since this may result in excessive register
96    utilization.
97 
98    Note that since branches are never in basic blocks, but only end
99    basic blocks, this pass will not move branches.  But that is ok,
100    since we can use GNU's delayed branch scheduling pass to take care
101    of this case.
102 
103    Also note that no further optimizations based on algebraic
104    identities are performed, so this pass would be a good one to
105    perform instruction splitting, such as breaking up a multiply
106    instruction into shifts and adds where that is profitable.
107 
108    Given the memory aliasing analysis that this pass should perform,
109    it should be possible to remove redundant stores to memory, and to
110    load values from registers instead of hitting memory.
111 
112    Before reload, speculative insns are moved only if a 'proof' exists
113    that no exception will be caused by this, and if no live registers
114    exist that inhibit the motion (live registers constraints are not
115    represented by data dependence edges).
116 
117    This pass must update information that subsequent passes expect to
118    be correct.  Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119    reg_n_calls_crossed, and reg_live_length.  Also, BB_HEAD, BB_END.
120 
121    The information in the line number notes is carefully retained by
122    this pass.  Notes that refer to the starting and ending of
123    exception regions are also carefully retained by this pass.  All
124    other NOTE insns are grouped in their same relative order at the
125    beginning of basic blocks and regions that have been scheduled.  */
126 
127 #include "config.h"
128 #include "system.h"
129 #include "coretypes.h"
130 #include "tm.h"
131 #include "toplev.h"
132 #include "rtl.h"
133 #include "tm_p.h"
134 #include "hard-reg-set.h"
135 #include "regs.h"
136 #include "function.h"
137 #include "flags.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
140 #include "except.h"
141 #include "toplev.h"
142 #include "recog.h"
143 #include "sched-int.h"
144 #include "target.h"
145 #include "output.h"
146 #include "params.h"
147 
148 #ifdef INSN_SCHEDULING
149 
150 /* issue_rate is the number of insns that can be scheduled in the same
151    machine cycle.  It can be defined in the config/mach/mach.h file,
152    otherwise we set it to 1.  */
153 
154 static int issue_rate;
155 
156 /* sched-verbose controls the amount of debugging output the
157    scheduler prints.  It is controlled by -fsched-verbose=N:
158    N>0 and no -DSR : the output is directed to stderr.
159    N>=10 will direct the printouts to stderr (regardless of -dSR).
160    N=1: same as -dSR.
161    N=2: bb's probabilities, detailed ready list info, unit/insn info.
162    N=3: rtl at abort point, control-flow, regions info.
163    N=5: dependences info.  */
164 
165 static int sched_verbose_param = 0;
166 int sched_verbose = 0;
167 
168 /* Debugging file.  All printouts are sent to dump, which is always set,
169    either to stderr, or to the dump listing file (-dRS).  */
170 FILE *sched_dump = 0;
171 
172 /* Highest uid before scheduling.  */
173 static int old_max_uid;
174 
175 /* fix_sched_param() is called from toplev.c upon detection
176    of the -fsched-verbose=N option.  */
177 
178 void
fix_sched_param(const char * param,const char * val)179 fix_sched_param (const char *param, const char *val)
180 {
181   if (!strcmp (param, "verbose"))
182     sched_verbose_param = atoi (val);
183   else
184     warning (0, "fix_sched_param: unknown param: %s", param);
185 }
186 
187 struct haifa_insn_data *h_i_d;
188 
189 #define LINE_NOTE(INSN)		(h_i_d[INSN_UID (INSN)].line_note)
190 #define INSN_TICK(INSN)		(h_i_d[INSN_UID (INSN)].tick)
191 #define INTER_TICK(INSN)        (h_i_d[INSN_UID (INSN)].inter_tick)
192 
193 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
194    then it should be recalculated from scratch.  */
195 #define INVALID_TICK (-(max_insn_queue_index + 1))
196 /* The minimal value of the INSN_TICK of an instruction.  */
197 #define MIN_TICK (-max_insn_queue_index)
198 
199 /* Issue points are used to distinguish between instructions in max_issue ().
200    For now, all instructions are equally good.  */
201 #define ISSUE_POINTS(INSN) 1
202 
203 /* Vector indexed by basic block number giving the starting line-number
204    for each basic block.  */
205 static rtx *line_note_head;
206 
207 /* List of important notes we must keep around.  This is a pointer to the
208    last element in the list.  */
209 static rtx note_list;
210 
211 static struct spec_info_def spec_info_var;
212 /* Description of the speculative part of the scheduling.
213    If NULL - no speculation.  */
214 static spec_info_t spec_info;
215 
216 /* True, if recovery block was added during scheduling of current block.
217    Used to determine, if we need to fix INSN_TICKs.  */
218 static bool added_recovery_block_p;
219 
220 /* Counters of different types of speculative instructions.  */
221 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
222 
223 /* Pointers to GLAT data.  See init_glat for more information.  */
224 regset *glat_start, *glat_end;
225 
226 /* Array used in {unlink, restore}_bb_notes.  */
227 static rtx *bb_header = 0;
228 
229 /* Number of basic_blocks.  */
230 static int old_last_basic_block;
231 
232 /* Basic block after which recovery blocks will be created.  */
233 static basic_block before_recovery;
234 
235 /* Queues, etc.  */
236 
237 /* An instruction is ready to be scheduled when all insns preceding it
238    have already been scheduled.  It is important to ensure that all
239    insns which use its result will not be executed until its result
240    has been computed.  An insn is maintained in one of four structures:
241 
242    (P) the "Pending" set of insns which cannot be scheduled until
243    their dependencies have been satisfied.
244    (Q) the "Queued" set of insns that can be scheduled when sufficient
245    time has passed.
246    (R) the "Ready" list of unscheduled, uncommitted insns.
247    (S) the "Scheduled" list of insns.
248 
249    Initially, all insns are either "Pending" or "Ready" depending on
250    whether their dependencies are satisfied.
251 
252    Insns move from the "Ready" list to the "Scheduled" list as they
253    are committed to the schedule.  As this occurs, the insns in the
254    "Pending" list have their dependencies satisfied and move to either
255    the "Ready" list or the "Queued" set depending on whether
256    sufficient time has passed to make them ready.  As time passes,
257    insns move from the "Queued" set to the "Ready" list.
258 
259    The "Pending" list (P) are the insns in the INSN_DEPEND of the unscheduled
260    insns, i.e., those that are ready, queued, and pending.
261    The "Queued" set (Q) is implemented by the variable `insn_queue'.
262    The "Ready" list (R) is implemented by the variables `ready' and
263    `n_ready'.
264    The "Scheduled" list (S) is the new insn chain built by this pass.
265 
266    The transition (R->S) is implemented in the scheduling loop in
267    `schedule_block' when the best insn to schedule is chosen.
268    The transitions (P->R and P->Q) are implemented in `schedule_insn' as
269    insns move from the ready list to the scheduled list.
270    The transition (Q->R) is implemented in 'queue_to_insn' as time
271    passes or stalls are introduced.  */
272 
273 /* Implement a circular buffer to delay instructions until sufficient
274    time has passed.  For the new pipeline description interface,
275    MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
276    than maximal time of instruction execution computed by genattr.c on
277    the base maximal time of functional unit reservations and getting a
278    result.  This is the longest time an insn may be queued.  */
279 
280 static rtx *insn_queue;
281 static int q_ptr = 0;
282 static int q_size = 0;
283 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
284 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
285 
286 #define QUEUE_SCHEDULED (-3)
287 #define QUEUE_NOWHERE   (-2)
288 #define QUEUE_READY     (-1)
289 /* QUEUE_SCHEDULED - INSN is scheduled.
290    QUEUE_NOWHERE   - INSN isn't scheduled yet and is neither in
291    queue or ready list.
292    QUEUE_READY     - INSN is in ready list.
293    N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles.  */
294 
295 #define QUEUE_INDEX(INSN) (h_i_d[INSN_UID (INSN)].queue_index)
296 
297 /* The following variable value refers for all current and future
298    reservations of the processor units.  */
299 state_t curr_state;
300 
301 /* The following variable value is size of memory representing all
302    current and future reservations of the processor units.  */
303 static size_t dfa_state_size;
304 
305 /* The following array is used to find the best insn from ready when
306    the automaton pipeline interface is used.  */
307 static char *ready_try;
308 
309 /* Describe the ready list of the scheduler.
310    VEC holds space enough for all insns in the current region.  VECLEN
311    says how many exactly.
312    FIRST is the index of the element with the highest priority; i.e. the
313    last one in the ready list, since elements are ordered by ascending
314    priority.
315    N_READY determines how many insns are on the ready list.  */
316 
317 struct ready_list
318 {
319   rtx *vec;
320   int veclen;
321   int first;
322   int n_ready;
323 };
324 
325 /* The pointer to the ready list.  */
326 static struct ready_list *readyp;
327 
328 /* Scheduling clock.  */
329 static int clock_var;
330 
331 /* Number of instructions in current scheduling region.  */
332 static int rgn_n_insns;
333 
334 static int may_trap_exp (rtx, int);
335 
336 /* Nonzero iff the address is comprised from at most 1 register.  */
337 #define CONST_BASED_ADDRESS_P(x)			\
338   (REG_P (x)					\
339    || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS	\
340 	|| (GET_CODE (x) == LO_SUM))			\
341        && (CONSTANT_P (XEXP (x, 0))			\
342 	   || CONSTANT_P (XEXP (x, 1)))))
343 
344 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
345    as found by analyzing insn's expression.  */
346 
347 static int
may_trap_exp(rtx x,int is_store)348 may_trap_exp (rtx x, int is_store)
349 {
350   enum rtx_code code;
351 
352   if (x == 0)
353     return TRAP_FREE;
354   code = GET_CODE (x);
355   if (is_store)
356     {
357       if (code == MEM && may_trap_p (x))
358 	return TRAP_RISKY;
359       else
360 	return TRAP_FREE;
361     }
362   if (code == MEM)
363     {
364       /* The insn uses memory:  a volatile load.  */
365       if (MEM_VOLATILE_P (x))
366 	return IRISKY;
367       /* An exception-free load.  */
368       if (!may_trap_p (x))
369 	return IFREE;
370       /* A load with 1 base register, to be further checked.  */
371       if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
372 	return PFREE_CANDIDATE;
373       /* No info on the load, to be further checked.  */
374       return PRISKY_CANDIDATE;
375     }
376   else
377     {
378       const char *fmt;
379       int i, insn_class = TRAP_FREE;
380 
381       /* Neither store nor load, check if it may cause a trap.  */
382       if (may_trap_p (x))
383 	return TRAP_RISKY;
384       /* Recursive step: walk the insn...  */
385       fmt = GET_RTX_FORMAT (code);
386       for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
387 	{
388 	  if (fmt[i] == 'e')
389 	    {
390 	      int tmp_class = may_trap_exp (XEXP (x, i), is_store);
391 	      insn_class = WORST_CLASS (insn_class, tmp_class);
392 	    }
393 	  else if (fmt[i] == 'E')
394 	    {
395 	      int j;
396 	      for (j = 0; j < XVECLEN (x, i); j++)
397 		{
398 		  int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
399 		  insn_class = WORST_CLASS (insn_class, tmp_class);
400 		  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
401 		    break;
402 		}
403 	    }
404 	  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
405 	    break;
406 	}
407       return insn_class;
408     }
409 }
410 
411 /* Classifies insn for the purpose of verifying that it can be
412    moved speculatively, by examining it's patterns, returning:
413    TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
414    TRAP_FREE: non-load insn.
415    IFREE: load from a globally safe location.
416    IRISKY: volatile load.
417    PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
418    being either PFREE or PRISKY.  */
419 
420 int
haifa_classify_insn(rtx insn)421 haifa_classify_insn (rtx insn)
422 {
423   rtx pat = PATTERN (insn);
424   int tmp_class = TRAP_FREE;
425   int insn_class = TRAP_FREE;
426   enum rtx_code code;
427 
428   if (GET_CODE (pat) == PARALLEL)
429     {
430       int i, len = XVECLEN (pat, 0);
431 
432       for (i = len - 1; i >= 0; i--)
433 	{
434 	  code = GET_CODE (XVECEXP (pat, 0, i));
435 	  switch (code)
436 	    {
437 	    case CLOBBER:
438 	      /* Test if it is a 'store'.  */
439 	      tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
440 	      break;
441 	    case SET:
442 	      /* Test if it is a store.  */
443 	      tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
444 	      if (tmp_class == TRAP_RISKY)
445 		break;
446 	      /* Test if it is a load.  */
447 	      tmp_class
448 		= WORST_CLASS (tmp_class,
449 			       may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
450 					     0));
451 	      break;
452 	    case COND_EXEC:
453 	    case TRAP_IF:
454 	      tmp_class = TRAP_RISKY;
455 	      break;
456 	    default:
457 	      ;
458 	    }
459 	  insn_class = WORST_CLASS (insn_class, tmp_class);
460 	  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
461 	    break;
462 	}
463     }
464   else
465     {
466       code = GET_CODE (pat);
467       switch (code)
468 	{
469 	case CLOBBER:
470 	  /* Test if it is a 'store'.  */
471 	  tmp_class = may_trap_exp (XEXP (pat, 0), 1);
472 	  break;
473 	case SET:
474 	  /* Test if it is a store.  */
475 	  tmp_class = may_trap_exp (SET_DEST (pat), 1);
476 	  if (tmp_class == TRAP_RISKY)
477 	    break;
478 	  /* Test if it is a load.  */
479 	  tmp_class =
480 	    WORST_CLASS (tmp_class,
481 			 may_trap_exp (SET_SRC (pat), 0));
482 	  break;
483 	case COND_EXEC:
484 	case TRAP_IF:
485 	  tmp_class = TRAP_RISKY;
486 	  break;
487 	default:;
488 	}
489       insn_class = tmp_class;
490     }
491 
492   return insn_class;
493 }
494 
495 /* Forward declarations.  */
496 
497 HAIFA_INLINE static int insn_cost1 (rtx, enum reg_note, rtx, rtx);
498 static int priority (rtx);
499 static int rank_for_schedule (const void *, const void *);
500 static void swap_sort (rtx *, int);
501 static void queue_insn (rtx, int);
502 static int schedule_insn (rtx);
503 static int find_set_reg_weight (rtx);
504 static void find_insn_reg_weight (basic_block);
505 static void find_insn_reg_weight1 (rtx);
506 static void adjust_priority (rtx);
507 static void advance_one_cycle (void);
508 
509 /* Notes handling mechanism:
510    =========================
511    Generally, NOTES are saved before scheduling and restored after scheduling.
512    The scheduler distinguishes between three types of notes:
513 
514    (1) LINE_NUMBER notes, generated and used for debugging.  Here,
515    before scheduling a region, a pointer to the LINE_NUMBER note is
516    added to the insn following it (in save_line_notes()), and the note
517    is removed (in rm_line_notes() and unlink_line_notes()).  After
518    scheduling the region, this pointer is used for regeneration of
519    the LINE_NUMBER note (in restore_line_notes()).
520 
521    (2) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
522    Before scheduling a region, a pointer to the note is added to the insn
523    that follows or precedes it.  (This happens as part of the data dependence
524    computation).  After scheduling an insn, the pointer contained in it is
525    used for regenerating the corresponding note (in reemit_notes).
526 
527    (3) All other notes (e.g. INSN_DELETED):  Before scheduling a block,
528    these notes are put in a list (in rm_other_notes() and
529    unlink_other_notes ()).  After scheduling the block, these notes are
530    inserted at the beginning of the block (in schedule_block()).  */
531 
532 static rtx unlink_other_notes (rtx, rtx);
533 static rtx unlink_line_notes (rtx, rtx);
534 static void reemit_notes (rtx);
535 
536 static rtx *ready_lastpos (struct ready_list *);
537 static void ready_add (struct ready_list *, rtx, bool);
538 static void ready_sort (struct ready_list *);
539 static rtx ready_remove_first (struct ready_list *);
540 
541 static void queue_to_ready (struct ready_list *);
542 static int early_queue_to_ready (state_t, struct ready_list *);
543 
544 static void debug_ready_list (struct ready_list *);
545 
546 static void move_insn (rtx);
547 
548 /* The following functions are used to implement multi-pass scheduling
549    on the first cycle.  */
550 static rtx ready_element (struct ready_list *, int);
551 static rtx ready_remove (struct ready_list *, int);
552 static void ready_remove_insn (rtx);
553 static int max_issue (struct ready_list *, int *, int);
554 
555 static rtx choose_ready (struct ready_list *);
556 
557 static void fix_inter_tick (rtx, rtx);
558 static int fix_tick_ready (rtx);
559 static void change_queue_index (rtx, int);
560 static void resolve_dep (rtx, rtx);
561 
562 /* The following functions are used to implement scheduling of data/control
563    speculative instructions.  */
564 
565 static void extend_h_i_d (void);
566 static void extend_ready (int);
567 static void extend_global (rtx);
568 static void extend_all (rtx);
569 static void init_h_i_d (rtx);
570 static void generate_recovery_code (rtx);
571 static void process_insn_depend_be_in_spec (rtx, rtx, ds_t);
572 static void begin_speculative_block (rtx);
573 static void add_to_speculative_block (rtx);
574 static dw_t dep_weak (ds_t);
575 static edge find_fallthru_edge (basic_block);
576 static void init_before_recovery (void);
577 static basic_block create_recovery_block (void);
578 static void create_check_block_twin (rtx, bool);
579 static void fix_recovery_deps (basic_block);
580 static void associate_line_notes_with_blocks (basic_block);
581 static void change_pattern (rtx, rtx);
582 static int speculate_insn (rtx, ds_t, rtx *);
583 static void dump_new_block_header (int, basic_block, rtx, rtx);
584 static void restore_bb_notes (basic_block);
585 static void extend_bb (basic_block);
586 static void fix_jump_move (rtx);
587 static void move_block_after_check (rtx);
588 static void move_succs (VEC(edge,gc) **, basic_block);
589 static void init_glat (void);
590 static void init_glat1 (basic_block);
591 static void attach_life_info1 (basic_block);
592 static void free_glat (void);
593 static void sched_remove_insn (rtx);
594 static void clear_priorities (rtx);
595 static void add_jump_dependencies (rtx, rtx);
596 static void calc_priorities (rtx);
597 #ifdef ENABLE_CHECKING
598 static int has_edge_p (VEC(edge,gc) *, int);
599 static void check_cfg (rtx, rtx);
600 static void check_sched_flags (void);
601 #endif
602 
603 #endif /* INSN_SCHEDULING */
604 
605 /* Point to state used for the current scheduling pass.  */
606 struct sched_info *current_sched_info;
607 
608 #ifndef INSN_SCHEDULING
609 void
schedule_insns(void)610 schedule_insns (void)
611 {
612 }
613 #else
614 
615 /* Working copy of frontend's sched_info variable.  */
616 static struct sched_info current_sched_info_var;
617 
618 /* Pointer to the last instruction scheduled.  Used by rank_for_schedule,
619    so that insns independent of the last scheduled insn will be preferred
620    over dependent instructions.  */
621 
622 static rtx last_scheduled_insn;
623 
624 /* Compute cost of executing INSN given the dependence LINK on the insn USED.
625    This is the number of cycles between instruction issue and
626    instruction results.  */
627 
628 HAIFA_INLINE int
insn_cost(rtx insn,rtx link,rtx used)629 insn_cost (rtx insn, rtx link, rtx used)
630 {
631   return insn_cost1 (insn, used ? REG_NOTE_KIND (link) : REG_NOTE_MAX,
632 		     link, used);
633 }
634 
635 /* Compute cost of executing INSN given the dependence on the insn USED.
636    If LINK is not NULL, then its REG_NOTE_KIND is used as a dependence type.
637    Otherwise, dependence between INSN and USED is assumed to be of type
638    DEP_TYPE.  This function was introduced as a workaround for
639    targetm.adjust_cost hook.
640    This is the number of cycles between instruction issue and
641    instruction results.  */
642 
643 HAIFA_INLINE static int
insn_cost1(rtx insn,enum reg_note dep_type,rtx link,rtx used)644 insn_cost1 (rtx insn, enum reg_note dep_type, rtx link, rtx used)
645 {
646   int cost = INSN_COST (insn);
647 
648   if (cost < 0)
649     {
650       /* A USE insn, or something else we don't need to
651 	 understand.  We can't pass these directly to
652 	 result_ready_cost or insn_default_latency because it will
653 	 trigger a fatal error for unrecognizable insns.  */
654       if (recog_memoized (insn) < 0)
655 	{
656 	  INSN_COST (insn) = 0;
657 	  return 0;
658 	}
659       else
660 	{
661 	  cost = insn_default_latency (insn);
662 	  if (cost < 0)
663 	    cost = 0;
664 
665 	  INSN_COST (insn) = cost;
666 	}
667     }
668 
669   /* In this case estimate cost without caring how insn is used.  */
670   if (used == 0)
671     return cost;
672 
673   /* A USE insn should never require the value used to be computed.
674      This allows the computation of a function's result and parameter
675      values to overlap the return and call.  */
676   if (recog_memoized (used) < 0)
677     cost = 0;
678   else
679     {
680       gcc_assert (!link || dep_type == REG_NOTE_KIND (link));
681 
682       if (INSN_CODE (insn) >= 0)
683 	{
684 	  if (dep_type == REG_DEP_ANTI)
685 	    cost = 0;
686 	  else if (dep_type == REG_DEP_OUTPUT)
687 	    {
688 	      cost = (insn_default_latency (insn)
689 		      - insn_default_latency (used));
690 	      if (cost <= 0)
691 		cost = 1;
692 	    }
693 	  else if (bypass_p (insn))
694 	    cost = insn_latency (insn, used);
695 	}
696 
697       if (targetm.sched.adjust_cost_2)
698 	cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost);
699       else
700 	{
701 	  gcc_assert (link);
702 	  if (targetm.sched.adjust_cost)
703 	    cost = targetm.sched.adjust_cost (used, link, insn, cost);
704 	}
705 
706       if (cost < 0)
707 	cost = 0;
708     }
709 
710   return cost;
711 }
712 
713 /* Compute the priority number for INSN.  */
714 
715 static int
priority(rtx insn)716 priority (rtx insn)
717 {
718   rtx link;
719 
720   if (! INSN_P (insn))
721     return 0;
722 
723   if (! INSN_PRIORITY_KNOWN (insn))
724     {
725       int this_priority = 0;
726 
727       if (INSN_DEPEND (insn) == 0)
728 	this_priority = insn_cost (insn, 0, 0);
729       else
730 	{
731 	  rtx prev_first, twin;
732 	  basic_block rec;
733 
734 	  /* For recovery check instructions we calculate priority slightly
735 	     different than that of normal instructions.  Instead of walking
736 	     through INSN_DEPEND (check) list, we walk through INSN_DEPEND list
737 	     of each instruction in the corresponding recovery block.  */
738 
739 	  rec = RECOVERY_BLOCK (insn);
740 	  if (!rec || rec == EXIT_BLOCK_PTR)
741 	    {
742 	      prev_first = PREV_INSN (insn);
743 	      twin = insn;
744 	    }
745 	  else
746 	    {
747 	      prev_first = NEXT_INSN (BB_HEAD (rec));
748 	      twin = PREV_INSN (BB_END (rec));
749 	    }
750 
751 	  do
752 	    {
753 	      for (link = INSN_DEPEND (twin); link; link = XEXP (link, 1))
754 		{
755 		  rtx next;
756 		  int next_priority;
757 
758 		  next = XEXP (link, 0);
759 
760 		  if (BLOCK_FOR_INSN (next) != rec)
761 		    {
762 		      /* Critical path is meaningful in block boundaries
763 			 only.  */
764 		      if (! (*current_sched_info->contributes_to_priority)
765 			  (next, insn)
766 			  /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
767 			     then speculative instructions will less likely be
768 			     scheduled.  That is because the priority of
769 			     their producers will increase, and, thus, the
770 			     producers will more likely be scheduled, thus,
771 			     resolving the dependence.  */
772 			  || ((current_sched_info->flags & DO_SPECULATION)
773 			      && (DEP_STATUS (link) & SPECULATIVE)
774 			      && !(spec_info->flags
775 				   & COUNT_SPEC_IN_CRITICAL_PATH)))
776 			continue;
777 
778 		      next_priority = insn_cost1 (insn,
779 						  twin == insn ?
780 						  REG_NOTE_KIND (link) :
781 						  REG_DEP_ANTI,
782 						  twin == insn ? link : 0,
783 						  next) + priority (next);
784 
785 		      if (next_priority > this_priority)
786 			this_priority = next_priority;
787 		    }
788 		}
789 
790 	      twin = PREV_INSN (twin);
791 	    }
792 	  while (twin != prev_first);
793 	}
794       INSN_PRIORITY (insn) = this_priority;
795       INSN_PRIORITY_KNOWN (insn) = 1;
796     }
797 
798   return INSN_PRIORITY (insn);
799 }
800 
801 /* Macros and functions for keeping the priority queue sorted, and
802    dealing with queuing and dequeuing of instructions.  */
803 
804 #define SCHED_SORT(READY, N_READY)                                   \
805 do { if ((N_READY) == 2)				             \
806        swap_sort (READY, N_READY);			             \
807      else if ((N_READY) > 2)                                         \
808          qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); }  \
809 while (0)
810 
811 /* Returns a positive value if x is preferred; returns a negative value if
812    y is preferred.  Should never return 0, since that will make the sort
813    unstable.  */
814 
815 static int
rank_for_schedule(const void * x,const void * y)816 rank_for_schedule (const void *x, const void *y)
817 {
818   rtx tmp = *(const rtx *) y;
819   rtx tmp2 = *(const rtx *) x;
820   rtx link;
821   int tmp_class, tmp2_class, depend_count1, depend_count2;
822   int val, priority_val, weight_val, info_val;
823 
824   /* The insn in a schedule group should be issued the first.  */
825   if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
826     return SCHED_GROUP_P (tmp2) ? 1 : -1;
827 
828   /* Prefer insn with higher priority.  */
829   priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
830 
831   if (priority_val)
832     return priority_val;
833 
834   /* Prefer speculative insn with greater dependencies weakness.  */
835   if (spec_info)
836     {
837       ds_t ds1, ds2;
838       dw_t dw1, dw2;
839       int dw;
840 
841       ds1 = TODO_SPEC (tmp) & SPECULATIVE;
842       if (ds1)
843 	dw1 = dep_weak (ds1);
844       else
845 	dw1 = NO_DEP_WEAK;
846 
847       ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
848       if (ds2)
849 	dw2 = dep_weak (ds2);
850       else
851 	dw2 = NO_DEP_WEAK;
852 
853       dw = dw2 - dw1;
854       if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
855 	return dw;
856     }
857 
858   /* Prefer an insn with smaller contribution to registers-pressure.  */
859   if (!reload_completed &&
860       (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
861     return weight_val;
862 
863   info_val = (*current_sched_info->rank) (tmp, tmp2);
864   if (info_val)
865     return info_val;
866 
867   /* Compare insns based on their relation to the last-scheduled-insn.  */
868   if (INSN_P (last_scheduled_insn))
869     {
870       /* Classify the instructions into three classes:
871          1) Data dependent on last schedule insn.
872          2) Anti/Output dependent on last scheduled insn.
873          3) Independent of last scheduled insn, or has latency of one.
874          Choose the insn from the highest numbered class if different.  */
875       link = find_insn_list (tmp, INSN_DEPEND (last_scheduled_insn));
876       if (link == 0 || insn_cost (last_scheduled_insn, link, tmp) == 1)
877 	tmp_class = 3;
878       else if (REG_NOTE_KIND (link) == 0)	/* Data dependence.  */
879 	tmp_class = 1;
880       else
881 	tmp_class = 2;
882 
883       link = find_insn_list (tmp2, INSN_DEPEND (last_scheduled_insn));
884       if (link == 0 || insn_cost (last_scheduled_insn, link, tmp2) == 1)
885 	tmp2_class = 3;
886       else if (REG_NOTE_KIND (link) == 0)	/* Data dependence.  */
887 	tmp2_class = 1;
888       else
889 	tmp2_class = 2;
890 
891       if ((val = tmp2_class - tmp_class))
892 	return val;
893     }
894 
895   /* Prefer the insn which has more later insns that depend on it.
896      This gives the scheduler more freedom when scheduling later
897      instructions at the expense of added register pressure.  */
898   depend_count1 = 0;
899   for (link = INSN_DEPEND (tmp); link; link = XEXP (link, 1))
900     depend_count1++;
901 
902   depend_count2 = 0;
903   for (link = INSN_DEPEND (tmp2); link; link = XEXP (link, 1))
904     depend_count2++;
905 
906   val = depend_count2 - depend_count1;
907   if (val)
908     return val;
909 
910   /* If insns are equally good, sort by INSN_LUID (original insn order),
911      so that we make the sort stable.  This minimizes instruction movement,
912      thus minimizing sched's effect on debugging and cross-jumping.  */
913   return INSN_LUID (tmp) - INSN_LUID (tmp2);
914 }
915 
916 /* Resort the array A in which only element at index N may be out of order.  */
917 
918 HAIFA_INLINE static void
swap_sort(rtx * a,int n)919 swap_sort (rtx *a, int n)
920 {
921   rtx insn = a[n - 1];
922   int i = n - 2;
923 
924   while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
925     {
926       a[i + 1] = a[i];
927       i -= 1;
928     }
929   a[i + 1] = insn;
930 }
931 
932 /* Add INSN to the insn queue so that it can be executed at least
933    N_CYCLES after the currently executing insn.  Preserve insns
934    chain for debugging purposes.  */
935 
936 HAIFA_INLINE static void
queue_insn(rtx insn,int n_cycles)937 queue_insn (rtx insn, int n_cycles)
938 {
939   int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
940   rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
941 
942   gcc_assert (n_cycles <= max_insn_queue_index);
943 
944   insn_queue[next_q] = link;
945   q_size += 1;
946 
947   if (sched_verbose >= 2)
948     {
949       fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
950 	       (*current_sched_info->print_insn) (insn, 0));
951 
952       fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
953     }
954 
955   QUEUE_INDEX (insn) = next_q;
956 }
957 
958 /* Remove INSN from queue.  */
959 static void
queue_remove(rtx insn)960 queue_remove (rtx insn)
961 {
962   gcc_assert (QUEUE_INDEX (insn) >= 0);
963   remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
964   q_size--;
965   QUEUE_INDEX (insn) = QUEUE_NOWHERE;
966 }
967 
968 /* Return a pointer to the bottom of the ready list, i.e. the insn
969    with the lowest priority.  */
970 
971 HAIFA_INLINE static rtx *
ready_lastpos(struct ready_list * ready)972 ready_lastpos (struct ready_list *ready)
973 {
974   gcc_assert (ready->n_ready >= 1);
975   return ready->vec + ready->first - ready->n_ready + 1;
976 }
977 
978 /* Add an element INSN to the ready list so that it ends up with the
979    lowest/highest priority depending on FIRST_P.  */
980 
981 HAIFA_INLINE static void
ready_add(struct ready_list * ready,rtx insn,bool first_p)982 ready_add (struct ready_list *ready, rtx insn, bool first_p)
983 {
984   if (!first_p)
985     {
986       if (ready->first == ready->n_ready)
987 	{
988 	  memmove (ready->vec + ready->veclen - ready->n_ready,
989 		   ready_lastpos (ready),
990 		   ready->n_ready * sizeof (rtx));
991 	  ready->first = ready->veclen - 1;
992 	}
993       ready->vec[ready->first - ready->n_ready] = insn;
994     }
995   else
996     {
997       if (ready->first == ready->veclen - 1)
998 	{
999 	  if (ready->n_ready)
1000 	    /* ready_lastpos() fails when called with (ready->n_ready == 0).  */
1001 	    memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1002 		     ready_lastpos (ready),
1003 		     ready->n_ready * sizeof (rtx));
1004 	  ready->first = ready->veclen - 2;
1005 	}
1006       ready->vec[++(ready->first)] = insn;
1007     }
1008 
1009   ready->n_ready++;
1010 
1011   gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1012   QUEUE_INDEX (insn) = QUEUE_READY;
1013 }
1014 
1015 /* Remove the element with the highest priority from the ready list and
1016    return it.  */
1017 
1018 HAIFA_INLINE static rtx
ready_remove_first(struct ready_list * ready)1019 ready_remove_first (struct ready_list *ready)
1020 {
1021   rtx t;
1022 
1023   gcc_assert (ready->n_ready);
1024   t = ready->vec[ready->first--];
1025   ready->n_ready--;
1026   /* If the queue becomes empty, reset it.  */
1027   if (ready->n_ready == 0)
1028     ready->first = ready->veclen - 1;
1029 
1030   gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1031   QUEUE_INDEX (t) = QUEUE_NOWHERE;
1032 
1033   return t;
1034 }
1035 
1036 /* The following code implements multi-pass scheduling for the first
1037    cycle.  In other words, we will try to choose ready insn which
1038    permits to start maximum number of insns on the same cycle.  */
1039 
1040 /* Return a pointer to the element INDEX from the ready.  INDEX for
1041    insn with the highest priority is 0, and the lowest priority has
1042    N_READY - 1.  */
1043 
1044 HAIFA_INLINE static rtx
ready_element(struct ready_list * ready,int index)1045 ready_element (struct ready_list *ready, int index)
1046 {
1047   gcc_assert (ready->n_ready && index < ready->n_ready);
1048 
1049   return ready->vec[ready->first - index];
1050 }
1051 
1052 /* Remove the element INDEX from the ready list and return it.  INDEX
1053    for insn with the highest priority is 0, and the lowest priority
1054    has N_READY - 1.  */
1055 
1056 HAIFA_INLINE static rtx
ready_remove(struct ready_list * ready,int index)1057 ready_remove (struct ready_list *ready, int index)
1058 {
1059   rtx t;
1060   int i;
1061 
1062   if (index == 0)
1063     return ready_remove_first (ready);
1064   gcc_assert (ready->n_ready && index < ready->n_ready);
1065   t = ready->vec[ready->first - index];
1066   ready->n_ready--;
1067   for (i = index; i < ready->n_ready; i++)
1068     ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1069   QUEUE_INDEX (t) = QUEUE_NOWHERE;
1070   return t;
1071 }
1072 
1073 /* Remove INSN from the ready list.  */
1074 static void
ready_remove_insn(rtx insn)1075 ready_remove_insn (rtx insn)
1076 {
1077   int i;
1078 
1079   for (i = 0; i < readyp->n_ready; i++)
1080     if (ready_element (readyp, i) == insn)
1081       {
1082         ready_remove (readyp, i);
1083         return;
1084       }
1085   gcc_unreachable ();
1086 }
1087 
1088 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1089    macro.  */
1090 
1091 HAIFA_INLINE static void
ready_sort(struct ready_list * ready)1092 ready_sort (struct ready_list *ready)
1093 {
1094   rtx *first = ready_lastpos (ready);
1095   SCHED_SORT (first, ready->n_ready);
1096 }
1097 
1098 /* PREV is an insn that is ready to execute.  Adjust its priority if that
1099    will help shorten or lengthen register lifetimes as appropriate.  Also
1100    provide a hook for the target to tweek itself.  */
1101 
1102 HAIFA_INLINE static void
adjust_priority(rtx prev)1103 adjust_priority (rtx prev)
1104 {
1105   /* ??? There used to be code here to try and estimate how an insn
1106      affected register lifetimes, but it did it by looking at REG_DEAD
1107      notes, which we removed in schedule_region.  Nor did it try to
1108      take into account register pressure or anything useful like that.
1109 
1110      Revisit when we have a machine model to work with and not before.  */
1111 
1112   if (targetm.sched.adjust_priority)
1113     INSN_PRIORITY (prev) =
1114       targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1115 }
1116 
1117 /* Advance time on one cycle.  */
1118 HAIFA_INLINE static void
advance_one_cycle(void)1119 advance_one_cycle (void)
1120 {
1121   if (targetm.sched.dfa_pre_cycle_insn)
1122     state_transition (curr_state,
1123 		      targetm.sched.dfa_pre_cycle_insn ());
1124 
1125   state_transition (curr_state, NULL);
1126 
1127   if (targetm.sched.dfa_post_cycle_insn)
1128     state_transition (curr_state,
1129 		      targetm.sched.dfa_post_cycle_insn ());
1130 }
1131 
1132 /* Clock at which the previous instruction was issued.  */
1133 static int last_clock_var;
1134 
1135 /* INSN is the "currently executing insn".  Launch each insn which was
1136    waiting on INSN.  READY is the ready list which contains the insns
1137    that are ready to fire.  CLOCK is the current cycle.  The function
1138    returns necessary cycle advance after issuing the insn (it is not
1139    zero for insns in a schedule group).  */
1140 
1141 static int
schedule_insn(rtx insn)1142 schedule_insn (rtx insn)
1143 {
1144   rtx link;
1145   int advance = 0;
1146 
1147   if (sched_verbose >= 1)
1148     {
1149       char buf[2048];
1150 
1151       print_insn (buf, insn, 0);
1152       buf[40] = 0;
1153       fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1154 
1155       if (recog_memoized (insn) < 0)
1156 	fprintf (sched_dump, "nothing");
1157       else
1158 	print_reservation (sched_dump, insn);
1159       fputc ('\n', sched_dump);
1160     }
1161 
1162   /* Scheduling instruction should have all its dependencies resolved and
1163      should have been removed from the ready list.  */
1164   gcc_assert (INSN_DEP_COUNT (insn) == 0);
1165   gcc_assert (!LOG_LINKS (insn));
1166   gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1167 
1168   QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1169 
1170   /* Now we can free RESOLVED_DEPS list.  */
1171   if (current_sched_info->flags & USE_DEPS_LIST)
1172     free_DEPS_LIST_list (&RESOLVED_DEPS (insn));
1173   else
1174     free_INSN_LIST_list (&RESOLVED_DEPS (insn));
1175 
1176   gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1177   if (INSN_TICK (insn) > clock_var)
1178     /* INSN has been prematurely moved from the queue to the ready list.
1179        This is possible only if following flag is set.  */
1180     gcc_assert (flag_sched_stalled_insns);
1181 
1182   /* ??? Probably, if INSN is scheduled prematurely, we should leave
1183      INSN_TICK untouched.  This is a machine-dependent issue, actually.  */
1184   INSN_TICK (insn) = clock_var;
1185 
1186   /* Update dependent instructions.  */
1187   for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
1188     {
1189       rtx next = XEXP (link, 0);
1190 
1191       resolve_dep (next, insn);
1192 
1193       if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
1194 	{
1195 	  int effective_cost;
1196 
1197 	  effective_cost = try_ready (next);
1198 
1199 	  if (effective_cost >= 0
1200 	      && SCHED_GROUP_P (next)
1201 	      && advance < effective_cost)
1202 	    advance = effective_cost;
1203 	}
1204       else
1205 	/* Check always has only one forward dependence (to the first insn in
1206 	   the recovery block), therefore, this will be executed only once.  */
1207 	{
1208 	  gcc_assert (XEXP (link, 1) == 0);
1209 	  fix_recovery_deps (RECOVERY_BLOCK (insn));
1210 	}
1211     }
1212 
1213   /* Annotate the instruction with issue information -- TImode
1214      indicates that the instruction is expected not to be able
1215      to issue on the same cycle as the previous insn.  A machine
1216      may use this information to decide how the instruction should
1217      be aligned.  */
1218   if (issue_rate > 1
1219       && GET_CODE (PATTERN (insn)) != USE
1220       && GET_CODE (PATTERN (insn)) != CLOBBER)
1221     {
1222       if (reload_completed)
1223 	PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1224       last_clock_var = clock_var;
1225     }
1226 
1227   return advance;
1228 }
1229 
1230 /* Functions for handling of notes.  */
1231 
1232 /* Delete notes beginning with INSN and put them in the chain
1233    of notes ended by NOTE_LIST.
1234    Returns the insn following the notes.  */
1235 
1236 static rtx
unlink_other_notes(rtx insn,rtx tail)1237 unlink_other_notes (rtx insn, rtx tail)
1238 {
1239   rtx prev = PREV_INSN (insn);
1240 
1241   while (insn != tail && NOTE_NOT_BB_P (insn))
1242     {
1243       rtx next = NEXT_INSN (insn);
1244       basic_block bb = BLOCK_FOR_INSN (insn);
1245 
1246       /* Delete the note from its current position.  */
1247       if (prev)
1248 	NEXT_INSN (prev) = next;
1249       if (next)
1250 	PREV_INSN (next) = prev;
1251 
1252       if (bb)
1253         {
1254           /* Basic block can begin with either LABEL or
1255              NOTE_INSN_BASIC_BLOCK.  */
1256           gcc_assert (BB_HEAD (bb) != insn);
1257 
1258           /* Check if we are removing last insn in the BB.  */
1259           if (BB_END (bb) == insn)
1260             BB_END (bb) = prev;
1261         }
1262 
1263       /* See sched_analyze to see how these are handled.  */
1264       if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
1265 	  && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
1266 	{
1267 	  /* Insert the note at the end of the notes list.  */
1268 	  PREV_INSN (insn) = note_list;
1269 	  if (note_list)
1270 	    NEXT_INSN (note_list) = insn;
1271 	  note_list = insn;
1272 	}
1273 
1274       insn = next;
1275     }
1276   return insn;
1277 }
1278 
1279 /* Delete line notes beginning with INSN. Record line-number notes so
1280    they can be reused.  Returns the insn following the notes.  */
1281 
1282 static rtx
unlink_line_notes(rtx insn,rtx tail)1283 unlink_line_notes (rtx insn, rtx tail)
1284 {
1285   rtx prev = PREV_INSN (insn);
1286 
1287   while (insn != tail && NOTE_NOT_BB_P (insn))
1288     {
1289       rtx next = NEXT_INSN (insn);
1290 
1291       if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
1292 	{
1293           basic_block bb = BLOCK_FOR_INSN (insn);
1294 
1295 	  /* Delete the note from its current position.  */
1296 	  if (prev)
1297 	    NEXT_INSN (prev) = next;
1298 	  if (next)
1299 	    PREV_INSN (next) = prev;
1300 
1301           if (bb)
1302             {
1303               /* Basic block can begin with either LABEL or
1304                  NOTE_INSN_BASIC_BLOCK.  */
1305               gcc_assert (BB_HEAD (bb) != insn);
1306 
1307               /* Check if we are removing last insn in the BB.  */
1308               if (BB_END (bb) == insn)
1309                 BB_END (bb) = prev;
1310             }
1311 
1312 	  /* Record line-number notes so they can be reused.  */
1313 	  LINE_NOTE (insn) = insn;
1314 	}
1315       else
1316 	prev = insn;
1317 
1318       insn = next;
1319     }
1320   return insn;
1321 }
1322 
1323 /* Return the head and tail pointers of ebb starting at BEG and ending
1324    at END.  */
1325 
1326 void
get_ebb_head_tail(basic_block beg,basic_block end,rtx * headp,rtx * tailp)1327 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1328 {
1329   rtx beg_head = BB_HEAD (beg);
1330   rtx beg_tail = BB_END (beg);
1331   rtx end_head = BB_HEAD (end);
1332   rtx end_tail = BB_END (end);
1333 
1334   /* Don't include any notes or labels at the beginning of the BEG
1335      basic block, or notes at the end of the END basic blocks.  */
1336 
1337   if (LABEL_P (beg_head))
1338     beg_head = NEXT_INSN (beg_head);
1339 
1340   while (beg_head != beg_tail)
1341     if (NOTE_P (beg_head))
1342       beg_head = NEXT_INSN (beg_head);
1343     else
1344       break;
1345 
1346   *headp = beg_head;
1347 
1348   if (beg == end)
1349     end_head = beg_head;
1350   else if (LABEL_P (end_head))
1351     end_head = NEXT_INSN (end_head);
1352 
1353   while (end_head != end_tail)
1354     if (NOTE_P (end_tail))
1355       end_tail = PREV_INSN (end_tail);
1356     else
1357       break;
1358 
1359   *tailp = end_tail;
1360 }
1361 
1362 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ].  */
1363 
1364 int
no_real_insns_p(rtx head,rtx tail)1365 no_real_insns_p (rtx head, rtx tail)
1366 {
1367   while (head != NEXT_INSN (tail))
1368     {
1369       if (!NOTE_P (head) && !LABEL_P (head))
1370 	return 0;
1371       head = NEXT_INSN (head);
1372     }
1373   return 1;
1374 }
1375 
1376 /* Delete line notes from one block. Save them so they can be later restored
1377    (in restore_line_notes).  HEAD and TAIL are the boundaries of the
1378    block in which notes should be processed.  */
1379 
1380 void
rm_line_notes(rtx head,rtx tail)1381 rm_line_notes (rtx head, rtx tail)
1382 {
1383   rtx next_tail;
1384   rtx insn;
1385 
1386   next_tail = NEXT_INSN (tail);
1387   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1388     {
1389       rtx prev;
1390 
1391       /* Farm out notes, and maybe save them in NOTE_LIST.
1392          This is needed to keep the debugger from
1393          getting completely deranged.  */
1394       if (NOTE_NOT_BB_P (insn))
1395 	{
1396 	  prev = insn;
1397 	  insn = unlink_line_notes (insn, next_tail);
1398 
1399 	  gcc_assert (prev != tail && prev != head && insn != next_tail);
1400 	}
1401     }
1402 }
1403 
1404 /* Save line number notes for each insn in block B.  HEAD and TAIL are
1405    the boundaries of the block in which notes should be processed.  */
1406 
1407 void
save_line_notes(int b,rtx head,rtx tail)1408 save_line_notes (int b, rtx head, rtx tail)
1409 {
1410   rtx next_tail;
1411 
1412   /* We must use the true line number for the first insn in the block
1413      that was computed and saved at the start of this pass.  We can't
1414      use the current line number, because scheduling of the previous
1415      block may have changed the current line number.  */
1416 
1417   rtx line = line_note_head[b];
1418   rtx insn;
1419 
1420   next_tail = NEXT_INSN (tail);
1421 
1422   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1423     if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1424       line = insn;
1425     else
1426       LINE_NOTE (insn) = line;
1427 }
1428 
1429 /* After a block was scheduled, insert line notes into the insns list.
1430    HEAD and TAIL are the boundaries of the block in which notes should
1431    be processed.  */
1432 
1433 void
restore_line_notes(rtx head,rtx tail)1434 restore_line_notes (rtx head, rtx tail)
1435 {
1436   rtx line, note, prev, new;
1437   int added_notes = 0;
1438   rtx next_tail, insn;
1439 
1440   head = head;
1441   next_tail = NEXT_INSN (tail);
1442 
1443   /* Determine the current line-number.  We want to know the current
1444      line number of the first insn of the block here, in case it is
1445      different from the true line number that was saved earlier.  If
1446      different, then we need a line number note before the first insn
1447      of this block.  If it happens to be the same, then we don't want to
1448      emit another line number note here.  */
1449   for (line = head; line; line = PREV_INSN (line))
1450     if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
1451       break;
1452 
1453   /* Walk the insns keeping track of the current line-number and inserting
1454      the line-number notes as needed.  */
1455   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1456     if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1457       line = insn;
1458   /* This used to emit line number notes before every non-deleted note.
1459      However, this confuses a debugger, because line notes not separated
1460      by real instructions all end up at the same address.  I can find no
1461      use for line number notes before other notes, so none are emitted.  */
1462     else if (!NOTE_P (insn)
1463 	     && INSN_UID (insn) < old_max_uid
1464 	     && (note = LINE_NOTE (insn)) != 0
1465 	     && note != line
1466 	     && (line == 0
1467 #ifdef USE_MAPPED_LOCATION
1468 		 || NOTE_SOURCE_LOCATION (note) != NOTE_SOURCE_LOCATION (line)
1469 #else
1470 		 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
1471 		 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)
1472 #endif
1473 		 ))
1474       {
1475 	line = note;
1476 	prev = PREV_INSN (insn);
1477 	if (LINE_NOTE (note))
1478 	  {
1479 	    /* Re-use the original line-number note.  */
1480 	    LINE_NOTE (note) = 0;
1481 	    PREV_INSN (note) = prev;
1482 	    NEXT_INSN (prev) = note;
1483 	    PREV_INSN (insn) = note;
1484 	    NEXT_INSN (note) = insn;
1485 	    set_block_for_insn (note, BLOCK_FOR_INSN (insn));
1486 	  }
1487 	else
1488 	  {
1489 	    added_notes++;
1490 	    new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
1491 #ifndef USE_MAPPED_LOCATION
1492 	    NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
1493 #endif
1494 	  }
1495       }
1496   if (sched_verbose && added_notes)
1497     fprintf (sched_dump, ";; added %d line-number notes\n", added_notes);
1498 }
1499 
1500 /* After scheduling the function, delete redundant line notes from the
1501    insns list.  */
1502 
1503 void
rm_redundant_line_notes(void)1504 rm_redundant_line_notes (void)
1505 {
1506   rtx line = 0;
1507   rtx insn = get_insns ();
1508   int active_insn = 0;
1509   int notes = 0;
1510 
1511   /* Walk the insns deleting redundant line-number notes.  Many of these
1512      are already present.  The remainder tend to occur at basic
1513      block boundaries.  */
1514   for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
1515     if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1516       {
1517 	/* If there are no active insns following, INSN is redundant.  */
1518 	if (active_insn == 0)
1519 	  {
1520 	    notes++;
1521 	    SET_INSN_DELETED (insn);
1522 	  }
1523 	/* If the line number is unchanged, LINE is redundant.  */
1524 	else if (line
1525 #ifdef USE_MAPPED_LOCATION
1526 		 && NOTE_SOURCE_LOCATION (line) == NOTE_SOURCE_LOCATION (insn)
1527 #else
1528 		 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
1529 		 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn)
1530 #endif
1531 )
1532 	  {
1533 	    notes++;
1534 	    SET_INSN_DELETED (line);
1535 	    line = insn;
1536 	  }
1537 	else
1538 	  line = insn;
1539 	active_insn = 0;
1540       }
1541     else if (!((NOTE_P (insn)
1542 		&& NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
1543 	       || (NONJUMP_INSN_P (insn)
1544 		   && (GET_CODE (PATTERN (insn)) == USE
1545 		       || GET_CODE (PATTERN (insn)) == CLOBBER))))
1546       active_insn++;
1547 
1548   if (sched_verbose && notes)
1549     fprintf (sched_dump, ";; deleted %d line-number notes\n", notes);
1550 }
1551 
1552 /* Delete notes between HEAD and TAIL and put them in the chain
1553    of notes ended by NOTE_LIST.  */
1554 
1555 void
rm_other_notes(rtx head,rtx tail)1556 rm_other_notes (rtx head, rtx tail)
1557 {
1558   rtx next_tail;
1559   rtx insn;
1560 
1561   note_list = 0;
1562   if (head == tail && (! INSN_P (head)))
1563     return;
1564 
1565   next_tail = NEXT_INSN (tail);
1566   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1567     {
1568       rtx prev;
1569 
1570       /* Farm out notes, and maybe save them in NOTE_LIST.
1571          This is needed to keep the debugger from
1572          getting completely deranged.  */
1573       if (NOTE_NOT_BB_P (insn))
1574 	{
1575 	  prev = insn;
1576 
1577 	  insn = unlink_other_notes (insn, next_tail);
1578 
1579 	  gcc_assert (prev != tail && prev != head && insn != next_tail);
1580 	}
1581     }
1582 }
1583 
1584 /* Functions for computation of registers live/usage info.  */
1585 
1586 /* This function looks for a new register being defined.
1587    If the destination register is already used by the source,
1588    a new register is not needed.  */
1589 
1590 static int
find_set_reg_weight(rtx x)1591 find_set_reg_weight (rtx x)
1592 {
1593   if (GET_CODE (x) == CLOBBER
1594       && register_operand (SET_DEST (x), VOIDmode))
1595     return 1;
1596   if (GET_CODE (x) == SET
1597       && register_operand (SET_DEST (x), VOIDmode))
1598     {
1599       if (REG_P (SET_DEST (x)))
1600 	{
1601 	  if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
1602 	    return 1;
1603 	  else
1604 	    return 0;
1605 	}
1606       return 1;
1607     }
1608   return 0;
1609 }
1610 
1611 /* Calculate INSN_REG_WEIGHT for all insns of a block.  */
1612 
1613 static void
find_insn_reg_weight(basic_block bb)1614 find_insn_reg_weight (basic_block bb)
1615 {
1616   rtx insn, next_tail, head, tail;
1617 
1618   get_ebb_head_tail (bb, bb, &head, &tail);
1619   next_tail = NEXT_INSN (tail);
1620 
1621   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1622     find_insn_reg_weight1 (insn);
1623 }
1624 
1625 /* Calculate INSN_REG_WEIGHT for single instruction.
1626    Separated from find_insn_reg_weight because of need
1627    to initialize new instruction in generate_recovery_code.  */
1628 static void
find_insn_reg_weight1(rtx insn)1629 find_insn_reg_weight1 (rtx insn)
1630 {
1631   int reg_weight = 0;
1632   rtx x;
1633 
1634   /* Handle register life information.  */
1635   if (! INSN_P (insn))
1636     return;
1637 
1638   /* Increment weight for each register born here.  */
1639   x = PATTERN (insn);
1640   reg_weight += find_set_reg_weight (x);
1641   if (GET_CODE (x) == PARALLEL)
1642     {
1643       int j;
1644       for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
1645 	{
1646 	  x = XVECEXP (PATTERN (insn), 0, j);
1647 	  reg_weight += find_set_reg_weight (x);
1648 	}
1649     }
1650   /* Decrement weight for each register that dies here.  */
1651   for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
1652     {
1653       if (REG_NOTE_KIND (x) == REG_DEAD
1654 	  || REG_NOTE_KIND (x) == REG_UNUSED)
1655 	reg_weight--;
1656     }
1657 
1658   INSN_REG_WEIGHT (insn) = reg_weight;
1659 }
1660 
1661 /* Move insns that became ready to fire from queue to ready list.  */
1662 
1663 static void
queue_to_ready(struct ready_list * ready)1664 queue_to_ready (struct ready_list *ready)
1665 {
1666   rtx insn;
1667   rtx link;
1668 
1669   q_ptr = NEXT_Q (q_ptr);
1670 
1671   /* Add all pending insns that can be scheduled without stalls to the
1672      ready list.  */
1673   for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
1674     {
1675       insn = XEXP (link, 0);
1676       q_size -= 1;
1677 
1678       if (sched_verbose >= 2)
1679 	fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1680 		 (*current_sched_info->print_insn) (insn, 0));
1681 
1682       /* If the ready list is full, delay the insn for 1 cycle.
1683 	 See the comment in schedule_block for the rationale.  */
1684       if (!reload_completed
1685 	  && ready->n_ready > MAX_SCHED_READY_INSNS
1686 	  && !SCHED_GROUP_P (insn))
1687 	{
1688 	  if (sched_verbose >= 2)
1689 	    fprintf (sched_dump, "requeued because ready full\n");
1690 	  queue_insn (insn, 1);
1691 	}
1692       else
1693 	{
1694 	  ready_add (ready, insn, false);
1695 	  if (sched_verbose >= 2)
1696 	    fprintf (sched_dump, "moving to ready without stalls\n");
1697         }
1698     }
1699   free_INSN_LIST_list (&insn_queue[q_ptr]);
1700 
1701   /* If there are no ready insns, stall until one is ready and add all
1702      of the pending insns at that point to the ready list.  */
1703   if (ready->n_ready == 0)
1704     {
1705       int stalls;
1706 
1707       for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
1708 	{
1709 	  if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1710 	    {
1711 	      for (; link; link = XEXP (link, 1))
1712 		{
1713 		  insn = XEXP (link, 0);
1714 		  q_size -= 1;
1715 
1716 		  if (sched_verbose >= 2)
1717 		    fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1718 			     (*current_sched_info->print_insn) (insn, 0));
1719 
1720 		  ready_add (ready, insn, false);
1721 		  if (sched_verbose >= 2)
1722 		    fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
1723 		}
1724 	      free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
1725 
1726 	      advance_one_cycle ();
1727 
1728 	      break;
1729 	    }
1730 
1731 	  advance_one_cycle ();
1732 	}
1733 
1734       q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
1735       clock_var += stalls;
1736     }
1737 }
1738 
1739 /* Used by early_queue_to_ready.  Determines whether it is "ok" to
1740    prematurely move INSN from the queue to the ready list.  Currently,
1741    if a target defines the hook 'is_costly_dependence', this function
1742    uses the hook to check whether there exist any dependences which are
1743    considered costly by the target, between INSN and other insns that
1744    have already been scheduled.  Dependences are checked up to Y cycles
1745    back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1746    controlling this value.
1747    (Other considerations could be taken into account instead (or in
1748    addition) depending on user flags and target hooks.  */
1749 
1750 static bool
ok_for_early_queue_removal(rtx insn)1751 ok_for_early_queue_removal (rtx insn)
1752 {
1753   int n_cycles;
1754   rtx prev_insn = last_scheduled_insn;
1755 
1756   if (targetm.sched.is_costly_dependence)
1757     {
1758       for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
1759 	{
1760 	  for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
1761 	    {
1762 	      rtx dep_link = 0;
1763 	      int dep_cost;
1764 
1765 	      if (!NOTE_P (prev_insn))
1766 		{
1767 		  dep_link = find_insn_list (insn, INSN_DEPEND (prev_insn));
1768 		  if (dep_link)
1769 		    {
1770 		      dep_cost = insn_cost (prev_insn, dep_link, insn) ;
1771 		      if (targetm.sched.is_costly_dependence (prev_insn, insn,
1772 				dep_link, dep_cost,
1773 				flag_sched_stalled_insns_dep - n_cycles))
1774 			return false;
1775 		    }
1776 		}
1777 
1778 	      if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
1779 		break;
1780 	    }
1781 
1782 	  if (!prev_insn)
1783 	    break;
1784 	  prev_insn = PREV_INSN (prev_insn);
1785 	}
1786     }
1787 
1788   return true;
1789 }
1790 
1791 
1792 /* Remove insns from the queue, before they become "ready" with respect
1793    to FU latency considerations.  */
1794 
1795 static int
early_queue_to_ready(state_t state,struct ready_list * ready)1796 early_queue_to_ready (state_t state, struct ready_list *ready)
1797 {
1798   rtx insn;
1799   rtx link;
1800   rtx next_link;
1801   rtx prev_link;
1802   bool move_to_ready;
1803   int cost;
1804   state_t temp_state = alloca (dfa_state_size);
1805   int stalls;
1806   int insns_removed = 0;
1807 
1808   /*
1809      Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1810      function:
1811 
1812      X == 0: There is no limit on how many queued insns can be removed
1813              prematurely.  (flag_sched_stalled_insns = -1).
1814 
1815      X >= 1: Only X queued insns can be removed prematurely in each
1816 	     invocation.  (flag_sched_stalled_insns = X).
1817 
1818      Otherwise: Early queue removal is disabled.
1819          (flag_sched_stalled_insns = 0)
1820   */
1821 
1822   if (! flag_sched_stalled_insns)
1823     return 0;
1824 
1825   for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
1826     {
1827       if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1828 	{
1829 	  if (sched_verbose > 6)
1830 	    fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
1831 
1832 	  prev_link = 0;
1833 	  while (link)
1834 	    {
1835 	      next_link = XEXP (link, 1);
1836 	      insn = XEXP (link, 0);
1837 	      if (insn && sched_verbose > 6)
1838 		print_rtl_single (sched_dump, insn);
1839 
1840 	      memcpy (temp_state, state, dfa_state_size);
1841 	      if (recog_memoized (insn) < 0)
1842 		/* non-negative to indicate that it's not ready
1843 		   to avoid infinite Q->R->Q->R... */
1844 		cost = 0;
1845 	      else
1846 		cost = state_transition (temp_state, insn);
1847 
1848 	      if (sched_verbose >= 6)
1849 		fprintf (sched_dump, "transition cost = %d\n", cost);
1850 
1851 	      move_to_ready = false;
1852 	      if (cost < 0)
1853 		{
1854 		  move_to_ready = ok_for_early_queue_removal (insn);
1855 		  if (move_to_ready == true)
1856 		    {
1857 		      /* move from Q to R */
1858 		      q_size -= 1;
1859 		      ready_add (ready, insn, false);
1860 
1861 		      if (prev_link)
1862 			XEXP (prev_link, 1) = next_link;
1863 		      else
1864 			insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
1865 
1866 		      free_INSN_LIST_node (link);
1867 
1868 		      if (sched_verbose >= 2)
1869 			fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
1870 				 (*current_sched_info->print_insn) (insn, 0));
1871 
1872 		      insns_removed++;
1873 		      if (insns_removed == flag_sched_stalled_insns)
1874 			/* Remove no more than flag_sched_stalled_insns insns
1875 			   from Q at a time.  */
1876 			return insns_removed;
1877 		    }
1878 		}
1879 
1880 	      if (move_to_ready == false)
1881 		prev_link = link;
1882 
1883 	      link = next_link;
1884 	    } /* while link */
1885 	} /* if link */
1886 
1887     } /* for stalls.. */
1888 
1889   return insns_removed;
1890 }
1891 
1892 
1893 /* Print the ready list for debugging purposes.  Callable from debugger.  */
1894 
1895 static void
debug_ready_list(struct ready_list * ready)1896 debug_ready_list (struct ready_list *ready)
1897 {
1898   rtx *p;
1899   int i;
1900 
1901   if (ready->n_ready == 0)
1902     {
1903       fprintf (sched_dump, "\n");
1904       return;
1905     }
1906 
1907   p = ready_lastpos (ready);
1908   for (i = 0; i < ready->n_ready; i++)
1909     fprintf (sched_dump, "  %s", (*current_sched_info->print_insn) (p[i], 0));
1910   fprintf (sched_dump, "\n");
1911 }
1912 
1913 /* Search INSN for REG_SAVE_NOTE note pairs for
1914    NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
1915    NOTEs.  The REG_SAVE_NOTE note following first one is contains the
1916    saved value for NOTE_BLOCK_NUMBER which is useful for
1917    NOTE_INSN_EH_REGION_{BEG,END} NOTEs.  */
1918 
1919 static void
reemit_notes(rtx insn)1920 reemit_notes (rtx insn)
1921 {
1922   rtx note, last = insn;
1923 
1924   for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1925     {
1926       if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
1927 	{
1928 	  enum insn_note note_type = INTVAL (XEXP (note, 0));
1929 
1930 	  last = emit_note_before (note_type, last);
1931 	  remove_note (insn, note);
1932 	}
1933     }
1934 }
1935 
1936 /* Move INSN.  Reemit notes if needed.  Update CFG, if needed.  */
1937 static void
move_insn(rtx insn)1938 move_insn (rtx insn)
1939 {
1940   rtx last = last_scheduled_insn;
1941 
1942   if (PREV_INSN (insn) != last)
1943     {
1944       basic_block bb;
1945       rtx note;
1946       int jump_p = 0;
1947 
1948       bb = BLOCK_FOR_INSN (insn);
1949 
1950       /* BB_HEAD is either LABEL or NOTE.  */
1951       gcc_assert (BB_HEAD (bb) != insn);
1952 
1953       if (BB_END (bb) == insn)
1954 	/* If this is last instruction in BB, move end marker one
1955 	   instruction up.  */
1956 	{
1957 	  /* Jumps are always placed at the end of basic block.  */
1958 	  jump_p = control_flow_insn_p (insn);
1959 
1960 	  gcc_assert (!jump_p
1961 		      || ((current_sched_info->flags & SCHED_RGN)
1962 			  && IS_SPECULATION_BRANCHY_CHECK_P (insn))
1963 		      || (current_sched_info->flags & SCHED_EBB));
1964 
1965 	  gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
1966 
1967 	  BB_END (bb) = PREV_INSN (insn);
1968 	}
1969 
1970       gcc_assert (BB_END (bb) != last);
1971 
1972       if (jump_p)
1973 	/* We move the block note along with jump.  */
1974 	{
1975 	  /* NT is needed for assertion below.  */
1976 	  rtx nt = current_sched_info->next_tail;
1977 
1978 	  note = NEXT_INSN (insn);
1979 	  while (NOTE_NOT_BB_P (note) && note != nt)
1980 	    note = NEXT_INSN (note);
1981 
1982 	  if (note != nt
1983 	      && (LABEL_P (note)
1984 		  || BARRIER_P (note)))
1985 	    note = NEXT_INSN (note);
1986 
1987 	  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
1988 	}
1989       else
1990 	note = insn;
1991 
1992       NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
1993       PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
1994 
1995       NEXT_INSN (note) = NEXT_INSN (last);
1996       PREV_INSN (NEXT_INSN (last)) = note;
1997 
1998       NEXT_INSN (last) = insn;
1999       PREV_INSN (insn) = last;
2000 
2001       bb = BLOCK_FOR_INSN (last);
2002 
2003       if (jump_p)
2004 	{
2005 	  fix_jump_move (insn);
2006 
2007 	  if (BLOCK_FOR_INSN (insn) != bb)
2008 	    move_block_after_check (insn);
2009 
2010 	  gcc_assert (BB_END (bb) == last);
2011 	}
2012 
2013       set_block_for_insn (insn, bb);
2014 
2015       /* Update BB_END, if needed.  */
2016       if (BB_END (bb) == last)
2017 	BB_END (bb) = insn;
2018     }
2019 
2020   reemit_notes (insn);
2021 
2022   SCHED_GROUP_P (insn) = 0;
2023 }
2024 
2025 /* The following structure describe an entry of the stack of choices.  */
2026 struct choice_entry
2027 {
2028   /* Ordinal number of the issued insn in the ready queue.  */
2029   int index;
2030   /* The number of the rest insns whose issues we should try.  */
2031   int rest;
2032   /* The number of issued essential insns.  */
2033   int n;
2034   /* State after issuing the insn.  */
2035   state_t state;
2036 };
2037 
2038 /* The following array is used to implement a stack of choices used in
2039    function max_issue.  */
2040 static struct choice_entry *choice_stack;
2041 
2042 /* The following variable value is number of essential insns issued on
2043    the current cycle.  An insn is essential one if it changes the
2044    processors state.  */
2045 static int cycle_issued_insns;
2046 
2047 /* The following variable value is maximal number of tries of issuing
2048    insns for the first cycle multipass insn scheduling.  We define
2049    this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE).  We would not
2050    need this constraint if all real insns (with non-negative codes)
2051    had reservations because in this case the algorithm complexity is
2052    O(DFA_LOOKAHEAD**ISSUE_RATE).  Unfortunately, the dfa descriptions
2053    might be incomplete and such insn might occur.  For such
2054    descriptions, the complexity of algorithm (without the constraint)
2055    could achieve DFA_LOOKAHEAD ** N , where N is the queue length.  */
2056 static int max_lookahead_tries;
2057 
2058 /* The following value is value of hook
2059    `first_cycle_multipass_dfa_lookahead' at the last call of
2060    `max_issue'.  */
2061 static int cached_first_cycle_multipass_dfa_lookahead = 0;
2062 
2063 /* The following value is value of `issue_rate' at the last call of
2064    `sched_init'.  */
2065 static int cached_issue_rate = 0;
2066 
2067 /* The following function returns maximal (or close to maximal) number
2068    of insns which can be issued on the same cycle and one of which
2069    insns is insns with the best rank (the first insn in READY).  To
2070    make this function tries different samples of ready insns.  READY
2071    is current queue `ready'.  Global array READY_TRY reflects what
2072    insns are already issued in this try.  MAX_POINTS is the sum of points
2073    of all instructions in READY.  The function stops immediately,
2074    if it reached the such a solution, that all instruction can be issued.
2075    INDEX will contain index of the best insn in READY.  The following
2076    function is used only for first cycle multipass scheduling.  */
2077 static int
max_issue(struct ready_list * ready,int * index,int max_points)2078 max_issue (struct ready_list *ready, int *index, int max_points)
2079 {
2080   int n, i, all, n_ready, best, delay, tries_num, points = -1;
2081   struct choice_entry *top;
2082   rtx insn;
2083 
2084   best = 0;
2085   memcpy (choice_stack->state, curr_state, dfa_state_size);
2086   top = choice_stack;
2087   top->rest = cached_first_cycle_multipass_dfa_lookahead;
2088   top->n = 0;
2089   n_ready = ready->n_ready;
2090   for (all = i = 0; i < n_ready; i++)
2091     if (!ready_try [i])
2092       all++;
2093   i = 0;
2094   tries_num = 0;
2095   for (;;)
2096     {
2097       if (top->rest == 0 || i >= n_ready)
2098 	{
2099 	  if (top == choice_stack)
2100 	    break;
2101 	  if (best < top - choice_stack && ready_try [0])
2102 	    {
2103 	      best = top - choice_stack;
2104 	      *index = choice_stack [1].index;
2105 	      points = top->n;
2106 	      if (top->n == max_points || best == all)
2107 		break;
2108 	    }
2109 	  i = top->index;
2110 	  ready_try [i] = 0;
2111 	  top--;
2112 	  memcpy (curr_state, top->state, dfa_state_size);
2113 	}
2114       else if (!ready_try [i])
2115 	{
2116 	  tries_num++;
2117 	  if (tries_num > max_lookahead_tries)
2118 	    break;
2119 	  insn = ready_element (ready, i);
2120 	  delay = state_transition (curr_state, insn);
2121 	  if (delay < 0)
2122 	    {
2123 	      if (state_dead_lock_p (curr_state))
2124 		top->rest = 0;
2125 	      else
2126 		top->rest--;
2127 	      n = top->n;
2128 	      if (memcmp (top->state, curr_state, dfa_state_size) != 0)
2129 		n += ISSUE_POINTS (insn);
2130 	      top++;
2131 	      top->rest = cached_first_cycle_multipass_dfa_lookahead;
2132 	      top->index = i;
2133 	      top->n = n;
2134 	      memcpy (top->state, curr_state, dfa_state_size);
2135 	      ready_try [i] = 1;
2136 	      i = -1;
2137 	    }
2138 	}
2139       i++;
2140     }
2141   while (top != choice_stack)
2142     {
2143       ready_try [top->index] = 0;
2144       top--;
2145     }
2146   memcpy (curr_state, choice_stack->state, dfa_state_size);
2147 
2148   if (sched_verbose >= 4)
2149     fprintf (sched_dump, ";;\t\tChoosed insn : %s; points: %d/%d\n",
2150 	     (*current_sched_info->print_insn) (ready_element (ready, *index),
2151 						0),
2152 	     points, max_points);
2153 
2154   return best;
2155 }
2156 
2157 /* The following function chooses insn from READY and modifies
2158    *N_READY and READY.  The following function is used only for first
2159    cycle multipass scheduling.  */
2160 
2161 static rtx
choose_ready(struct ready_list * ready)2162 choose_ready (struct ready_list *ready)
2163 {
2164   int lookahead = 0;
2165 
2166   if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2167     lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2168   if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)))
2169     return ready_remove_first (ready);
2170   else
2171     {
2172       /* Try to choose the better insn.  */
2173       int index = 0, i, n;
2174       rtx insn;
2175       int more_issue, max_points, try_data = 1, try_control = 1;
2176 
2177       if (cached_first_cycle_multipass_dfa_lookahead != lookahead)
2178 	{
2179 	  cached_first_cycle_multipass_dfa_lookahead = lookahead;
2180 	  max_lookahead_tries = 100;
2181 	  for (i = 0; i < issue_rate; i++)
2182 	    max_lookahead_tries *= lookahead;
2183 	}
2184       insn = ready_element (ready, 0);
2185       if (INSN_CODE (insn) < 0)
2186 	return ready_remove_first (ready);
2187 
2188       if (spec_info
2189 	  && spec_info->flags & (PREFER_NON_DATA_SPEC
2190 				 | PREFER_NON_CONTROL_SPEC))
2191 	{
2192 	  for (i = 0, n = ready->n_ready; i < n; i++)
2193 	    {
2194 	      rtx x;
2195 	      ds_t s;
2196 
2197 	      x = ready_element (ready, i);
2198 	      s = TODO_SPEC (x);
2199 
2200 	      if (spec_info->flags & PREFER_NON_DATA_SPEC
2201 		  && !(s & DATA_SPEC))
2202 		{
2203 		  try_data = 0;
2204 		  if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2205 		      || !try_control)
2206 		    break;
2207 		}
2208 
2209 	      if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2210 		  && !(s & CONTROL_SPEC))
2211 		{
2212 		  try_control = 0;
2213 		  if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2214 		    break;
2215 		}
2216 	    }
2217 	}
2218 
2219       if ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2220 	  || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2221 	  || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2222 	      && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2223 	      (insn)))
2224 	/* Discard speculative instruction that stands first in the ready
2225 	   list.  */
2226 	{
2227 	  change_queue_index (insn, 1);
2228 	  return 0;
2229 	}
2230 
2231       max_points = ISSUE_POINTS (insn);
2232       more_issue = issue_rate - cycle_issued_insns - 1;
2233 
2234       for (i = 1; i < ready->n_ready; i++)
2235 	{
2236 	  insn = ready_element (ready, i);
2237 	  ready_try [i]
2238 	    = (INSN_CODE (insn) < 0
2239                || (!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2240                || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2241 	       || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2242 		   && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2243 		   (insn)));
2244 
2245 	  if (!ready_try [i] && more_issue-- > 0)
2246 	    max_points += ISSUE_POINTS (insn);
2247 	}
2248 
2249       if (max_issue (ready, &index, max_points) == 0)
2250 	return ready_remove_first (ready);
2251       else
2252 	return ready_remove (ready, index);
2253     }
2254 }
2255 
2256 /* Use forward list scheduling to rearrange insns of block pointed to by
2257    TARGET_BB, possibly bringing insns from subsequent blocks in the same
2258    region.  */
2259 
2260 void
schedule_block(basic_block * target_bb,int rgn_n_insns1)2261 schedule_block (basic_block *target_bb, int rgn_n_insns1)
2262 {
2263   struct ready_list ready;
2264   int i, first_cycle_insn_p;
2265   int can_issue_more;
2266   state_t temp_state = NULL;  /* It is used for multipass scheduling.  */
2267   int sort_p, advance, start_clock_var;
2268 
2269   /* Head/tail info for this block.  */
2270   rtx prev_head = current_sched_info->prev_head;
2271   rtx next_tail = current_sched_info->next_tail;
2272   rtx head = NEXT_INSN (prev_head);
2273   rtx tail = PREV_INSN (next_tail);
2274 
2275   /* We used to have code to avoid getting parameters moved from hard
2276      argument registers into pseudos.
2277 
2278      However, it was removed when it proved to be of marginal benefit
2279      and caused problems because schedule_block and compute_forward_dependences
2280      had different notions of what the "head" insn was.  */
2281 
2282   gcc_assert (head != tail || INSN_P (head));
2283 
2284   added_recovery_block_p = false;
2285 
2286   /* Debug info.  */
2287   if (sched_verbose)
2288     dump_new_block_header (0, *target_bb, head, tail);
2289 
2290   state_reset (curr_state);
2291 
2292   /* Allocate the ready list.  */
2293   readyp = &ready;
2294   ready.vec = NULL;
2295   ready_try = NULL;
2296   choice_stack = NULL;
2297 
2298   rgn_n_insns = -1;
2299   extend_ready (rgn_n_insns1 + 1);
2300 
2301   ready.first = ready.veclen - 1;
2302   ready.n_ready = 0;
2303 
2304   /* It is used for first cycle multipass scheduling.  */
2305   temp_state = alloca (dfa_state_size);
2306 
2307   if (targetm.sched.md_init)
2308     targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
2309 
2310   /* We start inserting insns after PREV_HEAD.  */
2311   last_scheduled_insn = prev_head;
2312 
2313   gcc_assert (NOTE_P (last_scheduled_insn)
2314 	      && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2315 
2316   /* Initialize INSN_QUEUE.  Q_SIZE is the total number of insns in the
2317      queue.  */
2318   q_ptr = 0;
2319   q_size = 0;
2320 
2321   insn_queue = alloca ((max_insn_queue_index + 1) * sizeof (rtx));
2322   memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2323 
2324   /* Start just before the beginning of time.  */
2325   clock_var = -1;
2326 
2327   /* We need queue and ready lists and clock_var be initialized
2328      in try_ready () (which is called through init_ready_list ()).  */
2329   (*current_sched_info->init_ready_list) ();
2330 
2331   /* The algorithm is O(n^2) in the number of ready insns at any given
2332      time in the worst case.  Before reload we are more likely to have
2333      big lists so truncate them to a reasonable size.  */
2334   if (!reload_completed && ready.n_ready > MAX_SCHED_READY_INSNS)
2335     {
2336       ready_sort (&ready);
2337 
2338       /* Find first free-standing insn past MAX_SCHED_READY_INSNS.  */
2339       for (i = MAX_SCHED_READY_INSNS; i < ready.n_ready; i++)
2340 	if (!SCHED_GROUP_P (ready_element (&ready, i)))
2341 	  break;
2342 
2343       if (sched_verbose >= 2)
2344 	{
2345 	  fprintf (sched_dump,
2346 		   ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2347 	  fprintf (sched_dump,
2348 		   ";;\t\t before reload => truncated to %d insns\n", i);
2349 	}
2350 
2351       /* Delay all insns past it for 1 cycle.  */
2352       while (i < ready.n_ready)
2353 	queue_insn (ready_remove (&ready, i), 1);
2354     }
2355 
2356   /* Now we can restore basic block notes and maintain precise cfg.  */
2357   restore_bb_notes (*target_bb);
2358 
2359   last_clock_var = -1;
2360 
2361   advance = 0;
2362 
2363   sort_p = TRUE;
2364   /* Loop until all the insns in BB are scheduled.  */
2365   while ((*current_sched_info->schedule_more_p) ())
2366     {
2367       do
2368 	{
2369 	  start_clock_var = clock_var;
2370 
2371 	  clock_var++;
2372 
2373 	  advance_one_cycle ();
2374 
2375 	  /* Add to the ready list all pending insns that can be issued now.
2376 	     If there are no ready insns, increment clock until one
2377 	     is ready and add all pending insns at that point to the ready
2378 	     list.  */
2379 	  queue_to_ready (&ready);
2380 
2381 	  gcc_assert (ready.n_ready);
2382 
2383 	  if (sched_verbose >= 2)
2384 	    {
2385 	      fprintf (sched_dump, ";;\t\tReady list after queue_to_ready:  ");
2386 	      debug_ready_list (&ready);
2387 	    }
2388 	  advance -= clock_var - start_clock_var;
2389 	}
2390       while (advance > 0);
2391 
2392       if (sort_p)
2393 	{
2394 	  /* Sort the ready list based on priority.  */
2395 	  ready_sort (&ready);
2396 
2397 	  if (sched_verbose >= 2)
2398 	    {
2399 	      fprintf (sched_dump, ";;\t\tReady list after ready_sort:  ");
2400 	      debug_ready_list (&ready);
2401 	    }
2402 	}
2403 
2404       /* Allow the target to reorder the list, typically for
2405 	 better instruction bundling.  */
2406       if (sort_p && targetm.sched.reorder
2407 	  && (ready.n_ready == 0
2408 	      || !SCHED_GROUP_P (ready_element (&ready, 0))))
2409 	can_issue_more =
2410 	  targetm.sched.reorder (sched_dump, sched_verbose,
2411 				 ready_lastpos (&ready),
2412 				 &ready.n_ready, clock_var);
2413       else
2414 	can_issue_more = issue_rate;
2415 
2416       first_cycle_insn_p = 1;
2417       cycle_issued_insns = 0;
2418       for (;;)
2419 	{
2420 	  rtx insn;
2421 	  int cost;
2422 	  bool asm_p = false;
2423 
2424 	  if (sched_verbose >= 2)
2425 	    {
2426 	      fprintf (sched_dump, ";;\tReady list (t = %3d):  ",
2427 		       clock_var);
2428 	      debug_ready_list (&ready);
2429 	    }
2430 
2431 	  if (ready.n_ready == 0
2432 	      && can_issue_more
2433 	      && reload_completed)
2434 	    {
2435 	      /* Allow scheduling insns directly from the queue in case
2436 		 there's nothing better to do (ready list is empty) but
2437 		 there are still vacant dispatch slots in the current cycle.  */
2438 	      if (sched_verbose >= 6)
2439 		fprintf(sched_dump,";;\t\tSecond chance\n");
2440 	      memcpy (temp_state, curr_state, dfa_state_size);
2441 	      if (early_queue_to_ready (temp_state, &ready))
2442 		ready_sort (&ready);
2443 	    }
2444 
2445 	  if (ready.n_ready == 0 || !can_issue_more
2446 	      || state_dead_lock_p (curr_state)
2447 	      || !(*current_sched_info->schedule_more_p) ())
2448 	    break;
2449 
2450 	  /* Select and remove the insn from the ready list.  */
2451 	  if (sort_p)
2452 	    {
2453 	      insn = choose_ready (&ready);
2454 	      if (!insn)
2455 		continue;
2456 	    }
2457 	  else
2458 	    insn = ready_remove_first (&ready);
2459 
2460 	  if (targetm.sched.dfa_new_cycle
2461 	      && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
2462 					      insn, last_clock_var,
2463 					      clock_var, &sort_p))
2464 	    /* SORT_P is used by the target to override sorting
2465 	       of the ready list.  This is needed when the target
2466 	       has modified its internal structures expecting that
2467 	       the insn will be issued next.  As we need the insn
2468 	       to have the highest priority (so it will be returned by
2469 	       the ready_remove_first call above), we invoke
2470 	       ready_add (&ready, insn, true).
2471 	       But, still, there is one issue: INSN can be later
2472 	       discarded by scheduler's front end through
2473 	       current_sched_info->can_schedule_ready_p, hence, won't
2474 	       be issued next.  */
2475 	    {
2476 	      ready_add (&ready, insn, true);
2477               break;
2478 	    }
2479 
2480 	  sort_p = TRUE;
2481 	  memcpy (temp_state, curr_state, dfa_state_size);
2482 	  if (recog_memoized (insn) < 0)
2483 	    {
2484 	      asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
2485 		       || asm_noperands (PATTERN (insn)) >= 0);
2486 	      if (!first_cycle_insn_p && asm_p)
2487 		/* This is asm insn which is tryed to be issued on the
2488 		   cycle not first.  Issue it on the next cycle.  */
2489 		cost = 1;
2490 	      else
2491 		/* A USE insn, or something else we don't need to
2492 		   understand.  We can't pass these directly to
2493 		   state_transition because it will trigger a
2494 		   fatal error for unrecognizable insns.  */
2495 		cost = 0;
2496 	    }
2497 	  else
2498 	    {
2499 	      cost = state_transition (temp_state, insn);
2500 	      if (cost < 0)
2501 		cost = 0;
2502 	      else if (cost == 0)
2503 		cost = 1;
2504 	    }
2505 
2506 	  if (cost >= 1)
2507 	    {
2508 	      queue_insn (insn, cost);
2509  	      if (SCHED_GROUP_P (insn))
2510  		{
2511  		  advance = cost;
2512  		  break;
2513  		}
2514 
2515 	      continue;
2516 	    }
2517 
2518 	  if (current_sched_info->can_schedule_ready_p
2519 	      && ! (*current_sched_info->can_schedule_ready_p) (insn))
2520 	    /* We normally get here only if we don't want to move
2521 	       insn from the split block.  */
2522 	    {
2523 	      TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
2524 	      continue;
2525 	    }
2526 
2527 	  /* DECISION is made.  */
2528 
2529           if (TODO_SPEC (insn) & SPECULATIVE)
2530             generate_recovery_code (insn);
2531 
2532 	  if (control_flow_insn_p (last_scheduled_insn)
2533 	      /* This is used to to switch basic blocks by request
2534 		 from scheduler front-end (actually, sched-ebb.c only).
2535 		 This is used to process blocks with single fallthru
2536 		 edge.  If succeeding block has jump, it [jump] will try
2537 		 move at the end of current bb, thus corrupting CFG.  */
2538 	      || current_sched_info->advance_target_bb (*target_bb, insn))
2539 	    {
2540 	      *target_bb = current_sched_info->advance_target_bb
2541 		(*target_bb, 0);
2542 
2543 	      if (sched_verbose)
2544 		{
2545 		  rtx x;
2546 
2547 		  x = next_real_insn (last_scheduled_insn);
2548 		  gcc_assert (x);
2549 		  dump_new_block_header (1, *target_bb, x, tail);
2550 		}
2551 
2552 	      last_scheduled_insn = bb_note (*target_bb);
2553 	    }
2554 
2555 	  /* Update counters, etc in the scheduler's front end.  */
2556 	  (*current_sched_info->begin_schedule_ready) (insn,
2557 						       last_scheduled_insn);
2558 
2559 	  move_insn (insn);
2560 	  last_scheduled_insn = insn;
2561 
2562 	  if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
2563             {
2564               cycle_issued_insns++;
2565               memcpy (curr_state, temp_state, dfa_state_size);
2566             }
2567 
2568 	  if (targetm.sched.variable_issue)
2569 	    can_issue_more =
2570 	      targetm.sched.variable_issue (sched_dump, sched_verbose,
2571 					       insn, can_issue_more);
2572 	  /* A naked CLOBBER or USE generates no instruction, so do
2573 	     not count them against the issue rate.  */
2574 	  else if (GET_CODE (PATTERN (insn)) != USE
2575 		   && GET_CODE (PATTERN (insn)) != CLOBBER)
2576 	    can_issue_more--;
2577 
2578 	  advance = schedule_insn (insn);
2579 
2580 	  /* After issuing an asm insn we should start a new cycle.  */
2581 	  if (advance == 0 && asm_p)
2582 	    advance = 1;
2583 	  if (advance != 0)
2584 	    break;
2585 
2586 	  first_cycle_insn_p = 0;
2587 
2588 	  /* Sort the ready list based on priority.  This must be
2589 	     redone here, as schedule_insn may have readied additional
2590 	     insns that will not be sorted correctly.  */
2591 	  if (ready.n_ready > 0)
2592 	    ready_sort (&ready);
2593 
2594 	  if (targetm.sched.reorder2
2595 	      && (ready.n_ready == 0
2596 		  || !SCHED_GROUP_P (ready_element (&ready, 0))))
2597 	    {
2598 	      can_issue_more =
2599 		targetm.sched.reorder2 (sched_dump, sched_verbose,
2600 					ready.n_ready
2601 					? ready_lastpos (&ready) : NULL,
2602 					&ready.n_ready, clock_var);
2603 	    }
2604 	}
2605     }
2606 
2607   /* Debug info.  */
2608   if (sched_verbose)
2609     {
2610       fprintf (sched_dump, ";;\tReady list (final):  ");
2611       debug_ready_list (&ready);
2612     }
2613 
2614   if (current_sched_info->queue_must_finish_empty)
2615     /* Sanity check -- queue must be empty now.  Meaningless if region has
2616        multiple bbs.  */
2617     gcc_assert (!q_size && !ready.n_ready);
2618   else
2619     {
2620       /* We must maintain QUEUE_INDEX between blocks in region.  */
2621       for (i = ready.n_ready - 1; i >= 0; i--)
2622 	{
2623 	  rtx x;
2624 
2625 	  x = ready_element (&ready, i);
2626 	  QUEUE_INDEX (x) = QUEUE_NOWHERE;
2627 	  TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2628 	}
2629 
2630       if (q_size)
2631 	for (i = 0; i <= max_insn_queue_index; i++)
2632 	  {
2633 	    rtx link;
2634 	    for (link = insn_queue[i]; link; link = XEXP (link, 1))
2635 	      {
2636 		rtx x;
2637 
2638 		x = XEXP (link, 0);
2639 		QUEUE_INDEX (x) = QUEUE_NOWHERE;
2640 		TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2641 	      }
2642 	    free_INSN_LIST_list (&insn_queue[i]);
2643 	  }
2644     }
2645 
2646   if (!current_sched_info->queue_must_finish_empty
2647       || added_recovery_block_p)
2648     {
2649       /* INSN_TICK (minimum clock tick at which the insn becomes
2650          ready) may be not correct for the insn in the subsequent
2651          blocks of the region.  We should use a correct value of
2652          `clock_var' or modify INSN_TICK.  It is better to keep
2653          clock_var value equal to 0 at the start of a basic block.
2654          Therefore we modify INSN_TICK here.  */
2655       fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
2656     }
2657 
2658   if (targetm.sched.md_finish)
2659     targetm.sched.md_finish (sched_dump, sched_verbose);
2660 
2661   /* Update head/tail boundaries.  */
2662   head = NEXT_INSN (prev_head);
2663   tail = last_scheduled_insn;
2664 
2665   /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2666      previously found among the insns.  Insert them at the beginning
2667      of the insns.  */
2668   if (note_list != 0)
2669     {
2670       basic_block head_bb = BLOCK_FOR_INSN (head);
2671       rtx note_head = note_list;
2672 
2673       while (PREV_INSN (note_head))
2674 	{
2675 	  set_block_for_insn (note_head, head_bb);
2676 	  note_head = PREV_INSN (note_head);
2677 	}
2678       /* In the above cycle we've missed this note:  */
2679       set_block_for_insn (note_head, head_bb);
2680 
2681       PREV_INSN (note_head) = PREV_INSN (head);
2682       NEXT_INSN (PREV_INSN (head)) = note_head;
2683       PREV_INSN (head) = note_list;
2684       NEXT_INSN (note_list) = head;
2685       head = note_head;
2686     }
2687 
2688   /* Debugging.  */
2689   if (sched_verbose)
2690     {
2691       fprintf (sched_dump, ";;   total time = %d\n;;   new head = %d\n",
2692 	       clock_var, INSN_UID (head));
2693       fprintf (sched_dump, ";;   new tail = %d\n\n",
2694 	       INSN_UID (tail));
2695     }
2696 
2697   current_sched_info->head = head;
2698   current_sched_info->tail = tail;
2699 
2700   free (ready.vec);
2701 
2702   free (ready_try);
2703   for (i = 0; i <= rgn_n_insns; i++)
2704     free (choice_stack [i].state);
2705   free (choice_stack);
2706 }
2707 
2708 /* Set_priorities: compute priority of each insn in the block.  */
2709 
2710 int
set_priorities(rtx head,rtx tail)2711 set_priorities (rtx head, rtx tail)
2712 {
2713   rtx insn;
2714   int n_insn;
2715   int sched_max_insns_priority =
2716 	current_sched_info->sched_max_insns_priority;
2717   rtx prev_head;
2718 
2719   if (head == tail && (! INSN_P (head)))
2720     return 0;
2721 
2722   n_insn = 0;
2723 
2724   prev_head = PREV_INSN (head);
2725   for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
2726     {
2727       if (!INSN_P (insn))
2728 	continue;
2729 
2730       n_insn++;
2731       (void) priority (insn);
2732 
2733       if (INSN_PRIORITY_KNOWN (insn))
2734 	sched_max_insns_priority =
2735 	  MAX (sched_max_insns_priority, INSN_PRIORITY (insn));
2736     }
2737 
2738   current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
2739 
2740   return n_insn;
2741 }
2742 
2743 /* Next LUID to assign to an instruction.  */
2744 static int luid;
2745 
2746 /* Initialize some global state for the scheduler.  */
2747 
2748 void
sched_init(void)2749 sched_init (void)
2750 {
2751   basic_block b;
2752   rtx insn;
2753   int i;
2754 
2755   /* Switch to working copy of sched_info.  */
2756   memcpy (&current_sched_info_var, current_sched_info,
2757 	  sizeof (current_sched_info_var));
2758   current_sched_info = &current_sched_info_var;
2759 
2760   /* Disable speculative loads in their presence if cc0 defined.  */
2761 #ifdef HAVE_cc0
2762   flag_schedule_speculative_load = 0;
2763 #endif
2764 
2765   /* Set dump and sched_verbose for the desired debugging output.  If no
2766      dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
2767      For -fsched-verbose=N, N>=10, print everything to stderr.  */
2768   sched_verbose = sched_verbose_param;
2769   if (sched_verbose_param == 0 && dump_file)
2770     sched_verbose = 1;
2771   sched_dump = ((sched_verbose_param >= 10 || !dump_file)
2772 		? stderr : dump_file);
2773 
2774   /* Initialize SPEC_INFO.  */
2775   if (targetm.sched.set_sched_flags)
2776     {
2777       spec_info = &spec_info_var;
2778       targetm.sched.set_sched_flags (spec_info);
2779       if (current_sched_info->flags & DO_SPECULATION)
2780 	spec_info->weakness_cutoff =
2781 	  (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
2782       else
2783 	/* So we won't read anything accidentally.  */
2784 	spec_info = 0;
2785 #ifdef ENABLE_CHECKING
2786       check_sched_flags ();
2787 #endif
2788     }
2789   else
2790     /* So we won't read anything accidentally.  */
2791     spec_info = 0;
2792 
2793   /* Initialize issue_rate.  */
2794   if (targetm.sched.issue_rate)
2795     issue_rate = targetm.sched.issue_rate ();
2796   else
2797     issue_rate = 1;
2798 
2799   if (cached_issue_rate != issue_rate)
2800     {
2801       cached_issue_rate = issue_rate;
2802       /* To invalidate max_lookahead_tries:  */
2803       cached_first_cycle_multipass_dfa_lookahead = 0;
2804     }
2805 
2806   old_max_uid = 0;
2807   h_i_d = 0;
2808   extend_h_i_d ();
2809 
2810   for (i = 0; i < old_max_uid; i++)
2811     {
2812       h_i_d[i].cost = -1;
2813       h_i_d[i].todo_spec = HARD_DEP;
2814       h_i_d[i].queue_index = QUEUE_NOWHERE;
2815       h_i_d[i].tick = INVALID_TICK;
2816       h_i_d[i].inter_tick = INVALID_TICK;
2817     }
2818 
2819   if (targetm.sched.init_dfa_pre_cycle_insn)
2820     targetm.sched.init_dfa_pre_cycle_insn ();
2821 
2822   if (targetm.sched.init_dfa_post_cycle_insn)
2823     targetm.sched.init_dfa_post_cycle_insn ();
2824 
2825   dfa_start ();
2826   dfa_state_size = state_size ();
2827   curr_state = xmalloc (dfa_state_size);
2828 
2829   h_i_d[0].luid = 0;
2830   luid = 1;
2831   FOR_EACH_BB (b)
2832     for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
2833       {
2834 	INSN_LUID (insn) = luid;
2835 
2836 	/* Increment the next luid, unless this is a note.  We don't
2837 	   really need separate IDs for notes and we don't want to
2838 	   schedule differently depending on whether or not there are
2839 	   line-number notes, i.e., depending on whether or not we're
2840 	   generating debugging information.  */
2841 	if (!NOTE_P (insn))
2842 	  ++luid;
2843 
2844 	if (insn == BB_END (b))
2845 	  break;
2846       }
2847 
2848   init_dependency_caches (luid);
2849 
2850   init_alias_analysis ();
2851 
2852   line_note_head = 0;
2853   old_last_basic_block = 0;
2854   glat_start = 0;
2855   glat_end = 0;
2856   extend_bb (0);
2857 
2858   if (current_sched_info->flags & USE_GLAT)
2859     init_glat ();
2860 
2861   /* Compute INSN_REG_WEIGHT for all blocks.  We must do this before
2862      removing death notes.  */
2863   FOR_EACH_BB_REVERSE (b)
2864     find_insn_reg_weight (b);
2865 
2866   if (targetm.sched.md_init_global)
2867       targetm.sched.md_init_global (sched_dump, sched_verbose, old_max_uid);
2868 
2869   nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
2870   before_recovery = 0;
2871 
2872 #ifdef ENABLE_CHECKING
2873   /* This is used preferably for finding bugs in check_cfg () itself.  */
2874   check_cfg (0, 0);
2875 #endif
2876 }
2877 
2878 /* Free global data used during insn scheduling.  */
2879 
2880 void
sched_finish(void)2881 sched_finish (void)
2882 {
2883   free (h_i_d);
2884   free (curr_state);
2885   dfa_finish ();
2886   free_dependency_caches ();
2887   end_alias_analysis ();
2888   free (line_note_head);
2889   free_glat ();
2890 
2891   if (targetm.sched.md_finish_global)
2892     targetm.sched.md_finish_global (sched_dump, sched_verbose);
2893 
2894   if (spec_info && spec_info->dump)
2895     {
2896       char c = reload_completed ? 'a' : 'b';
2897 
2898       fprintf (spec_info->dump,
2899 	       ";; %s:\n", current_function_name ());
2900 
2901       fprintf (spec_info->dump,
2902                ";; Procedure %cr-begin-data-spec motions == %d\n",
2903                c, nr_begin_data);
2904       fprintf (spec_info->dump,
2905                ";; Procedure %cr-be-in-data-spec motions == %d\n",
2906                c, nr_be_in_data);
2907       fprintf (spec_info->dump,
2908                ";; Procedure %cr-begin-control-spec motions == %d\n",
2909                c, nr_begin_control);
2910       fprintf (spec_info->dump,
2911                ";; Procedure %cr-be-in-control-spec motions == %d\n",
2912                c, nr_be_in_control);
2913     }
2914 
2915 #ifdef ENABLE_CHECKING
2916   /* After reload ia64 backend clobbers CFG, so can't check anything.  */
2917   if (!reload_completed)
2918     check_cfg (0, 0);
2919 #endif
2920 
2921   current_sched_info = NULL;
2922 }
2923 
2924 /* Fix INSN_TICKs of the instructions in the current block as well as
2925    INSN_TICKs of their dependents.
2926    HEAD and TAIL are the begin and the end of the current scheduled block.  */
2927 static void
fix_inter_tick(rtx head,rtx tail)2928 fix_inter_tick (rtx head, rtx tail)
2929 {
2930   /* Set of instructions with corrected INSN_TICK.  */
2931   bitmap_head processed;
2932   int next_clock = clock_var + 1;
2933 
2934   bitmap_initialize (&processed, 0);
2935 
2936   /* Iterates over scheduled instructions and fix their INSN_TICKs and
2937      INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
2938      across different blocks.  */
2939   for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
2940     {
2941       if (INSN_P (head))
2942 	{
2943 	  int tick;
2944 	  rtx link;
2945 
2946 	  tick = INSN_TICK (head);
2947 	  gcc_assert (tick >= MIN_TICK);
2948 
2949 	  /* Fix INSN_TICK of instruction from just scheduled block.  */
2950 	  if (!bitmap_bit_p (&processed, INSN_LUID (head)))
2951 	    {
2952 	      bitmap_set_bit (&processed, INSN_LUID (head));
2953 	      tick -= next_clock;
2954 
2955 	      if (tick < MIN_TICK)
2956 		tick = MIN_TICK;
2957 
2958 	      INSN_TICK (head) = tick;
2959 	    }
2960 
2961 	  for (link = INSN_DEPEND (head); link; link = XEXP (link, 1))
2962 	    {
2963 	      rtx next;
2964 
2965 	      next = XEXP (link, 0);
2966 	      tick = INSN_TICK (next);
2967 
2968 	      if (tick != INVALID_TICK
2969 		  /* If NEXT has its INSN_TICK calculated, fix it.
2970 		     If not - it will be properly calculated from
2971 		     scratch later in fix_tick_ready.  */
2972 		  && !bitmap_bit_p (&processed, INSN_LUID (next)))
2973 		{
2974 		  bitmap_set_bit (&processed, INSN_LUID (next));
2975 		  tick -= next_clock;
2976 
2977 		  if (tick < MIN_TICK)
2978 		    tick = MIN_TICK;
2979 
2980 		  if (tick > INTER_TICK (next))
2981 		    INTER_TICK (next) = tick;
2982 		  else
2983 		    tick = INTER_TICK (next);
2984 
2985 		  INSN_TICK (next) = tick;
2986 		}
2987 	    }
2988 	}
2989     }
2990   bitmap_clear (&processed);
2991 }
2992 
2993 /* Check if NEXT is ready to be added to the ready or queue list.
2994    If "yes", add it to the proper list.
2995    Returns:
2996       -1 - is not ready yet,
2997        0 - added to the ready list,
2998    0 < N - queued for N cycles.  */
2999 int
try_ready(rtx next)3000 try_ready (rtx next)
3001 {
3002   ds_t old_ts, *ts;
3003   rtx link;
3004 
3005   ts = &TODO_SPEC (next);
3006   old_ts = *ts;
3007 
3008   gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
3009 	      && ((old_ts & HARD_DEP)
3010 		  || (old_ts & SPECULATIVE)));
3011 
3012   if (!(current_sched_info->flags & DO_SPECULATION))
3013     {
3014       if (!LOG_LINKS (next))
3015         *ts &= ~HARD_DEP;
3016     }
3017   else
3018     {
3019       *ts &= ~SPECULATIVE & ~HARD_DEP;
3020 
3021       link = LOG_LINKS (next);
3022       if (link)
3023         {
3024           /* LOG_LINKS are maintained sorted.
3025              So if DEP_STATUS of the first dep is SPECULATIVE,
3026              than all other deps are speculative too.  */
3027           if (DEP_STATUS (link) & SPECULATIVE)
3028             {
3029               /* Now we've got NEXT with speculative deps only.
3030                  1. Look at the deps to see what we have to do.
3031                  2. Check if we can do 'todo'.  */
3032 	      *ts = DEP_STATUS (link) & SPECULATIVE;
3033               while ((link = XEXP (link, 1)))
3034 		*ts = ds_merge (*ts, DEP_STATUS (link) & SPECULATIVE);
3035 
3036 	      if (dep_weak (*ts) < spec_info->weakness_cutoff)
3037 		/* Too few points.  */
3038 		*ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3039 	    }
3040           else
3041             *ts |= HARD_DEP;
3042         }
3043     }
3044 
3045   if (*ts & HARD_DEP)
3046     gcc_assert (*ts == old_ts
3047 		&& QUEUE_INDEX (next) == QUEUE_NOWHERE);
3048   else if (current_sched_info->new_ready)
3049     *ts = current_sched_info->new_ready (next, *ts);
3050 
3051   /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3052      have its original pattern or changed (speculative) one.  This is due
3053      to changing ebb in region scheduling.
3054      * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3055      has speculative pattern.
3056 
3057      We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3058      control-speculative NEXT could have been discarded by sched-rgn.c
3059      (the same case as when discarded by can_schedule_ready_p ()).  */
3060 
3061   if ((*ts & SPECULATIVE)
3062       /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3063 	 need to change anything.  */
3064       && *ts != old_ts)
3065     {
3066       int res;
3067       rtx new_pat;
3068 
3069       gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3070 
3071       res = speculate_insn (next, *ts, &new_pat);
3072 
3073       switch (res)
3074 	{
3075 	case -1:
3076 	  /* It would be nice to change DEP_STATUS of all dependences,
3077 	     which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3078 	     so we won't reanalyze anything.  */
3079 	  *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3080 	  break;
3081 
3082 	case 0:
3083 	  /* We follow the rule, that every speculative insn
3084 	     has non-null ORIG_PAT.  */
3085 	  if (!ORIG_PAT (next))
3086 	    ORIG_PAT (next) = PATTERN (next);
3087 	  break;
3088 
3089 	case 1:
3090 	  if (!ORIG_PAT (next))
3091 	    /* If we gonna to overwrite the original pattern of insn,
3092 	       save it.  */
3093 	    ORIG_PAT (next) = PATTERN (next);
3094 
3095 	  change_pattern (next, new_pat);
3096 	  break;
3097 
3098 	default:
3099 	  gcc_unreachable ();
3100 	}
3101     }
3102 
3103   /* We need to restore pattern only if (*ts == 0), because otherwise it is
3104      either correct (*ts & SPECULATIVE),
3105      or we simply don't care (*ts & HARD_DEP).  */
3106 
3107   gcc_assert (!ORIG_PAT (next)
3108 	      || !IS_SPECULATION_BRANCHY_CHECK_P (next));
3109 
3110   if (*ts & HARD_DEP)
3111     {
3112       /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3113 	 control-speculative NEXT could have been discarded by sched-rgn.c
3114 	 (the same case as when discarded by can_schedule_ready_p ()).  */
3115       /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3116 
3117       change_queue_index (next, QUEUE_NOWHERE);
3118       return -1;
3119     }
3120   else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
3121     /* We should change pattern of every previously speculative
3122        instruction - and we determine if NEXT was speculative by using
3123        ORIG_PAT field.  Except one case - speculation checks have ORIG_PAT
3124        pat too, so skip them.  */
3125     {
3126       change_pattern (next, ORIG_PAT (next));
3127       ORIG_PAT (next) = 0;
3128     }
3129 
3130   if (sched_verbose >= 2)
3131     {
3132       int s = TODO_SPEC (next);
3133 
3134       fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3135                (*current_sched_info->print_insn) (next, 0));
3136 
3137       if (spec_info && spec_info->dump)
3138         {
3139           if (s & BEGIN_DATA)
3140             fprintf (spec_info->dump, "; data-spec;");
3141           if (s & BEGIN_CONTROL)
3142             fprintf (spec_info->dump, "; control-spec;");
3143           if (s & BE_IN_CONTROL)
3144             fprintf (spec_info->dump, "; in-control-spec;");
3145         }
3146 
3147       fprintf (sched_dump, "\n");
3148     }
3149 
3150   adjust_priority (next);
3151 
3152   return fix_tick_ready (next);
3153 }
3154 
3155 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list.  */
3156 static int
fix_tick_ready(rtx next)3157 fix_tick_ready (rtx next)
3158 {
3159   rtx link;
3160   int tick, delay;
3161 
3162   link = RESOLVED_DEPS (next);
3163 
3164   if (link)
3165     {
3166       int full_p;
3167 
3168       tick = INSN_TICK (next);
3169       /* if tick is not equal to INVALID_TICK, then update
3170 	 INSN_TICK of NEXT with the most recent resolved dependence
3171 	 cost.  Otherwise, recalculate from scratch.  */
3172       full_p = tick == INVALID_TICK;
3173       do
3174         {
3175           rtx pro;
3176           int tick1;
3177 
3178           pro = XEXP (link, 0);
3179 	  gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3180 
3181           tick1 = INSN_TICK (pro) + insn_cost (pro, link, next);
3182           if (tick1 > tick)
3183             tick = tick1;
3184         }
3185       while ((link = XEXP (link, 1)) && full_p);
3186     }
3187   else
3188     tick = -1;
3189 
3190   INSN_TICK (next) = tick;
3191 
3192   delay = tick - clock_var;
3193   if (delay <= 0)
3194     delay = QUEUE_READY;
3195 
3196   change_queue_index (next, delay);
3197 
3198   return delay;
3199 }
3200 
3201 /* Move NEXT to the proper queue list with (DELAY >= 1),
3202    or add it to the ready list (DELAY == QUEUE_READY),
3203    or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE).  */
3204 static void
change_queue_index(rtx next,int delay)3205 change_queue_index (rtx next, int delay)
3206 {
3207   int i = QUEUE_INDEX (next);
3208 
3209   gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3210 	      && delay != 0);
3211   gcc_assert (i != QUEUE_SCHEDULED);
3212 
3213   if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3214       || (delay < 0 && delay == i))
3215     /* We have nothing to do.  */
3216     return;
3217 
3218   /* Remove NEXT from wherever it is now.  */
3219   if (i == QUEUE_READY)
3220     ready_remove_insn (next);
3221   else if (i >= 0)
3222     queue_remove (next);
3223 
3224   /* Add it to the proper place.  */
3225   if (delay == QUEUE_READY)
3226     ready_add (readyp, next, false);
3227   else if (delay >= 1)
3228     queue_insn (next, delay);
3229 
3230   if (sched_verbose >= 2)
3231     {
3232       fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3233 	       (*current_sched_info->print_insn) (next, 0));
3234 
3235       if (delay == QUEUE_READY)
3236 	fprintf (sched_dump, " into ready\n");
3237       else if (delay >= 1)
3238 	fprintf (sched_dump, " into queue with cost=%d\n", delay);
3239       else
3240 	fprintf (sched_dump, " removed from ready or queue lists\n");
3241     }
3242 }
3243 
3244 /* INSN is being scheduled.  Resolve the dependence between INSN and NEXT.  */
3245 static void
resolve_dep(rtx next,rtx insn)3246 resolve_dep (rtx next, rtx insn)
3247 {
3248   rtx dep;
3249 
3250   INSN_DEP_COUNT (next)--;
3251 
3252   dep = remove_list_elem (insn, &LOG_LINKS (next));
3253   XEXP (dep, 1) = RESOLVED_DEPS (next);
3254   RESOLVED_DEPS (next) = dep;
3255 
3256   gcc_assert ((INSN_DEP_COUNT (next) != 0 || !LOG_LINKS (next))
3257 	      && (LOG_LINKS (next) || INSN_DEP_COUNT (next) == 0));
3258 }
3259 
3260 /* Extend H_I_D data.  */
3261 static void
extend_h_i_d(void)3262 extend_h_i_d (void)
3263 {
3264   /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
3265      pseudos which do not cross calls.  */
3266   int new_max_uid = get_max_uid() + 1;
3267 
3268   h_i_d = xrecalloc (h_i_d, new_max_uid, old_max_uid, sizeof (*h_i_d));
3269   old_max_uid = new_max_uid;
3270 
3271   if (targetm.sched.h_i_d_extended)
3272     targetm.sched.h_i_d_extended ();
3273 }
3274 
3275 /* Extend READY, READY_TRY and CHOICE_STACK arrays.
3276    N_NEW_INSNS is the number of additional elements to allocate.  */
3277 static void
extend_ready(int n_new_insns)3278 extend_ready (int n_new_insns)
3279 {
3280   int i;
3281 
3282   readyp->veclen = rgn_n_insns + n_new_insns + 1 + issue_rate;
3283   readyp->vec = XRESIZEVEC (rtx, readyp->vec, readyp->veclen);
3284 
3285   ready_try = xrecalloc (ready_try, rgn_n_insns + n_new_insns + 1,
3286 			 rgn_n_insns + 1, sizeof (char));
3287 
3288   rgn_n_insns += n_new_insns;
3289 
3290   choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3291 			     rgn_n_insns + 1);
3292 
3293   for (i = rgn_n_insns; n_new_insns--; i--)
3294     choice_stack[i].state = xmalloc (dfa_state_size);
3295 }
3296 
3297 /* Extend global scheduler structures (those, that live across calls to
3298    schedule_block) to include information about just emitted INSN.  */
3299 static void
extend_global(rtx insn)3300 extend_global (rtx insn)
3301 {
3302   gcc_assert (INSN_P (insn));
3303   /* These structures have scheduler scope.  */
3304   extend_h_i_d ();
3305   init_h_i_d (insn);
3306 
3307   extend_dependency_caches (1, 0);
3308 }
3309 
3310 /* Extends global and local scheduler structures to include information
3311    about just emitted INSN.  */
3312 static void
extend_all(rtx insn)3313 extend_all (rtx insn)
3314 {
3315   extend_global (insn);
3316 
3317   /* These structures have block scope.  */
3318   extend_ready (1);
3319 
3320   (*current_sched_info->add_remove_insn) (insn, 0);
3321 }
3322 
3323 /* Initialize h_i_d entry of the new INSN with default values.
3324    Values, that are not explicitly initialized here, hold zero.  */
3325 static void
init_h_i_d(rtx insn)3326 init_h_i_d (rtx insn)
3327 {
3328   INSN_LUID (insn) = luid++;
3329   INSN_COST (insn) = -1;
3330   TODO_SPEC (insn) = HARD_DEP;
3331   QUEUE_INDEX (insn) = QUEUE_NOWHERE;
3332   INSN_TICK (insn) = INVALID_TICK;
3333   INTER_TICK (insn) = INVALID_TICK;
3334   find_insn_reg_weight1 (insn);
3335 }
3336 
3337 /* Generates recovery code for INSN.  */
3338 static void
generate_recovery_code(rtx insn)3339 generate_recovery_code (rtx insn)
3340 {
3341   if (TODO_SPEC (insn) & BEGIN_SPEC)
3342     begin_speculative_block (insn);
3343 
3344   /* Here we have insn with no dependencies to
3345      instructions other then CHECK_SPEC ones.  */
3346 
3347   if (TODO_SPEC (insn) & BE_IN_SPEC)
3348     add_to_speculative_block (insn);
3349 }
3350 
3351 /* Helper function.
3352    Tries to add speculative dependencies of type FS between instructions
3353    in LINK list and TWIN.  */
3354 static void
process_insn_depend_be_in_spec(rtx link,rtx twin,ds_t fs)3355 process_insn_depend_be_in_spec (rtx link, rtx twin, ds_t fs)
3356 {
3357   for (; link; link = XEXP (link, 1))
3358     {
3359       ds_t ds;
3360       rtx consumer;
3361 
3362       consumer = XEXP (link, 0);
3363 
3364       ds = DEP_STATUS (link);
3365 
3366       if (/* If we want to create speculative dep.  */
3367 	  fs
3368 	  /* And we can do that because this is a true dep.  */
3369 	  && (ds & DEP_TYPES) == DEP_TRUE)
3370 	{
3371 	  gcc_assert (!(ds & BE_IN_SPEC));
3372 
3373 	  if (/* If this dep can be overcome with 'begin speculation'.  */
3374 	      ds & BEGIN_SPEC)
3375 	    /* Then we have a choice: keep the dep 'begin speculative'
3376 	       or transform it into 'be in speculative'.  */
3377 	    {
3378 	      if (/* In try_ready we assert that if insn once became ready
3379 		     it can be removed from the ready (or queue) list only
3380 		     due to backend decision.  Hence we can't let the
3381 		     probability of the speculative dep to decrease.  */
3382 		  dep_weak (ds) <= dep_weak (fs))
3383 		/* Transform it to be in speculative.  */
3384 		ds = (ds & ~BEGIN_SPEC) | fs;
3385 	    }
3386 	  else
3387 	    /* Mark the dep as 'be in speculative'.  */
3388 	    ds |= fs;
3389 	}
3390 
3391       add_back_forw_dep (consumer, twin, REG_NOTE_KIND (link), ds);
3392     }
3393 }
3394 
3395 /* Generates recovery code for BEGIN speculative INSN.  */
3396 static void
begin_speculative_block(rtx insn)3397 begin_speculative_block (rtx insn)
3398 {
3399   if (TODO_SPEC (insn) & BEGIN_DATA)
3400     nr_begin_data++;
3401   if (TODO_SPEC (insn) & BEGIN_CONTROL)
3402     nr_begin_control++;
3403 
3404   create_check_block_twin (insn, false);
3405 
3406   TODO_SPEC (insn) &= ~BEGIN_SPEC;
3407 }
3408 
3409 /* Generates recovery code for BE_IN speculative INSN.  */
3410 static void
add_to_speculative_block(rtx insn)3411 add_to_speculative_block (rtx insn)
3412 {
3413   ds_t ts;
3414   rtx link, twins = NULL;
3415 
3416   ts = TODO_SPEC (insn);
3417   gcc_assert (!(ts & ~BE_IN_SPEC));
3418 
3419   if (ts & BE_IN_DATA)
3420     nr_be_in_data++;
3421   if (ts & BE_IN_CONTROL)
3422     nr_be_in_control++;
3423 
3424   TODO_SPEC (insn) &= ~BE_IN_SPEC;
3425   gcc_assert (!TODO_SPEC (insn));
3426 
3427   DONE_SPEC (insn) |= ts;
3428 
3429   /* First we convert all simple checks to branchy.  */
3430   for (link = LOG_LINKS (insn); link;)
3431     {
3432       rtx check;
3433 
3434       check = XEXP (link, 0);
3435 
3436       if (IS_SPECULATION_SIMPLE_CHECK_P (check))
3437 	{
3438 	  create_check_block_twin (check, true);
3439 	  link = LOG_LINKS (insn);
3440 	}
3441       else
3442 	link = XEXP (link, 1);
3443     }
3444 
3445   clear_priorities (insn);
3446 
3447   do
3448     {
3449       rtx link, check, twin;
3450       basic_block rec;
3451 
3452       link = LOG_LINKS (insn);
3453       gcc_assert (!(DEP_STATUS (link) & BEGIN_SPEC)
3454 		  && (DEP_STATUS (link) & BE_IN_SPEC)
3455 		  && (DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
3456 
3457       check = XEXP (link, 0);
3458 
3459       gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
3460 		  && QUEUE_INDEX (check) == QUEUE_NOWHERE);
3461 
3462       rec = BLOCK_FOR_INSN (check);
3463 
3464       twin = emit_insn_before (copy_rtx (PATTERN (insn)), BB_END (rec));
3465       extend_global (twin);
3466 
3467       RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
3468 
3469       if (sched_verbose && spec_info->dump)
3470         /* INSN_BB (insn) isn't determined for twin insns yet.
3471            So we can't use current_sched_info->print_insn.  */
3472         fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3473                  INSN_UID (twin), rec->index);
3474 
3475       twins = alloc_INSN_LIST (twin, twins);
3476 
3477       /* Add dependences between TWIN and all appropriate
3478 	 instructions from REC.  */
3479       do
3480 	{
3481 	  add_back_forw_dep (twin, check, REG_DEP_TRUE, DEP_TRUE);
3482 
3483 	  do
3484 	    {
3485 	      link = XEXP (link, 1);
3486 	      if (link)
3487 		{
3488 		  check = XEXP (link, 0);
3489 		  if (BLOCK_FOR_INSN (check) == rec)
3490 		    break;
3491 		}
3492 	      else
3493 		break;
3494 	    }
3495 	  while (1);
3496 	}
3497       while (link);
3498 
3499       process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, ts);
3500 
3501       for (link = LOG_LINKS (insn); link;)
3502 	{
3503 	  check = XEXP (link, 0);
3504 
3505 	  if (BLOCK_FOR_INSN (check) == rec)
3506 	    {
3507 	      delete_back_forw_dep (insn, check);
3508 	      link = LOG_LINKS (insn);
3509 	    }
3510 	  else
3511 	    link = XEXP (link, 1);
3512 	}
3513     }
3514   while (LOG_LINKS (insn));
3515 
3516   /* We can't add the dependence between insn and twin earlier because
3517      that would make twin appear in the INSN_DEPEND (insn).  */
3518   while (twins)
3519     {
3520       rtx twin;
3521 
3522       twin = XEXP (twins, 0);
3523       calc_priorities (twin);
3524       add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3525 
3526       twin = XEXP (twins, 1);
3527       free_INSN_LIST_node (twins);
3528       twins = twin;
3529     }
3530 }
3531 
3532 /* Extends and fills with zeros (only the new part) array pointed to by P.  */
3533 void *
xrecalloc(void * p,size_t new_nmemb,size_t old_nmemb,size_t size)3534 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
3535 {
3536   gcc_assert (new_nmemb >= old_nmemb);
3537   p = XRESIZEVAR (void, p, new_nmemb * size);
3538   memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
3539   return p;
3540 }
3541 
3542 /* Return the probability of speculation success for the speculation
3543    status DS.  */
3544 static dw_t
dep_weak(ds_t ds)3545 dep_weak (ds_t ds)
3546 {
3547   ds_t res = 1, dt;
3548   int n = 0;
3549 
3550   dt = FIRST_SPEC_TYPE;
3551   do
3552     {
3553       if (ds & dt)
3554 	{
3555 	  res *= (ds_t) get_dep_weak (ds, dt);
3556 	  n++;
3557 	}
3558 
3559       if (dt == LAST_SPEC_TYPE)
3560 	break;
3561       dt <<= SPEC_TYPE_SHIFT;
3562     }
3563   while (1);
3564 
3565   gcc_assert (n);
3566   while (--n)
3567     res /= MAX_DEP_WEAK;
3568 
3569   if (res < MIN_DEP_WEAK)
3570     res = MIN_DEP_WEAK;
3571 
3572   gcc_assert (res <= MAX_DEP_WEAK);
3573 
3574   return (dw_t) res;
3575 }
3576 
3577 /* Helper function.
3578    Find fallthru edge from PRED.  */
3579 static edge
find_fallthru_edge(basic_block pred)3580 find_fallthru_edge (basic_block pred)
3581 {
3582   edge e;
3583   edge_iterator ei;
3584   basic_block succ;
3585 
3586   succ = pred->next_bb;
3587   gcc_assert (succ->prev_bb == pred);
3588 
3589   if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
3590     {
3591       FOR_EACH_EDGE (e, ei, pred->succs)
3592 	if (e->flags & EDGE_FALLTHRU)
3593 	  {
3594 	    gcc_assert (e->dest == succ);
3595 	    return e;
3596 	  }
3597     }
3598   else
3599     {
3600       FOR_EACH_EDGE (e, ei, succ->preds)
3601 	if (e->flags & EDGE_FALLTHRU)
3602 	  {
3603 	    gcc_assert (e->src == pred);
3604 	    return e;
3605 	  }
3606     }
3607 
3608   return NULL;
3609 }
3610 
3611 /* Initialize BEFORE_RECOVERY variable.  */
3612 static void
init_before_recovery(void)3613 init_before_recovery (void)
3614 {
3615   basic_block last;
3616   edge e;
3617 
3618   last = EXIT_BLOCK_PTR->prev_bb;
3619   e = find_fallthru_edge (last);
3620 
3621   if (e)
3622     {
3623       /* We create two basic blocks:
3624          1. Single instruction block is inserted right after E->SRC
3625          and has jump to
3626          2. Empty block right before EXIT_BLOCK.
3627          Between these two blocks recovery blocks will be emitted.  */
3628 
3629       basic_block single, empty;
3630       rtx x, label;
3631 
3632       single = create_empty_bb (last);
3633       empty = create_empty_bb (single);
3634 
3635       single->count = last->count;
3636       empty->count = last->count;
3637       single->frequency = last->frequency;
3638       empty->frequency = last->frequency;
3639       BB_COPY_PARTITION (single, last);
3640       BB_COPY_PARTITION (empty, last);
3641 
3642       redirect_edge_succ (e, single);
3643       make_single_succ_edge (single, empty, 0);
3644       make_single_succ_edge (empty, EXIT_BLOCK_PTR,
3645 			     EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
3646 
3647       label = block_label (empty);
3648       x = emit_jump_insn_after (gen_jump (label), BB_END (single));
3649       JUMP_LABEL (x) = label;
3650       LABEL_NUSES (label)++;
3651       extend_global (x);
3652 
3653       emit_barrier_after (x);
3654 
3655       add_block (empty, 0);
3656       add_block (single, 0);
3657 
3658       before_recovery = single;
3659 
3660       if (sched_verbose >= 2 && spec_info->dump)
3661         fprintf (spec_info->dump,
3662 		 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
3663                  last->index, single->index, empty->index);
3664     }
3665   else
3666     before_recovery = last;
3667 }
3668 
3669 /* Returns new recovery block.  */
3670 static basic_block
create_recovery_block(void)3671 create_recovery_block (void)
3672 {
3673   rtx label;
3674   rtx barrier;
3675   basic_block rec;
3676 
3677   added_recovery_block_p = true;
3678 
3679   if (!before_recovery)
3680     init_before_recovery ();
3681 
3682   barrier = get_last_bb_insn (before_recovery);
3683   gcc_assert (BARRIER_P (barrier));
3684 
3685   label = emit_label_after (gen_label_rtx (), barrier);
3686 
3687   rec = create_basic_block (label, label, before_recovery);
3688 
3689   /* Recovery block always end with an unconditional jump.  */
3690   emit_barrier_after (BB_END (rec));
3691 
3692   if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
3693     BB_SET_PARTITION (rec, BB_COLD_PARTITION);
3694 
3695   if (sched_verbose && spec_info->dump)
3696     fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
3697              rec->index);
3698 
3699   before_recovery = rec;
3700 
3701   return rec;
3702 }
3703 
3704 /* This function creates recovery code for INSN.  If MUTATE_P is nonzero,
3705    INSN is a simple check, that should be converted to branchy one.  */
3706 static void
create_check_block_twin(rtx insn,bool mutate_p)3707 create_check_block_twin (rtx insn, bool mutate_p)
3708 {
3709   basic_block rec;
3710   rtx label, check, twin, link;
3711   ds_t fs;
3712 
3713   gcc_assert (ORIG_PAT (insn)
3714 	      && (!mutate_p
3715 		  || (IS_SPECULATION_SIMPLE_CHECK_P (insn)
3716 		      && !(TODO_SPEC (insn) & SPECULATIVE))));
3717 
3718   /* Create recovery block.  */
3719   if (mutate_p || targetm.sched.needs_block_p (insn))
3720     {
3721       rec = create_recovery_block ();
3722       label = BB_HEAD (rec);
3723     }
3724   else
3725     {
3726       rec = EXIT_BLOCK_PTR;
3727       label = 0;
3728     }
3729 
3730   /* Emit CHECK.  */
3731   check = targetm.sched.gen_check (insn, label, mutate_p);
3732 
3733   if (rec != EXIT_BLOCK_PTR)
3734     {
3735       /* To have mem_reg alive at the beginning of second_bb,
3736 	 we emit check BEFORE insn, so insn after splitting
3737 	 insn will be at the beginning of second_bb, which will
3738 	 provide us with the correct life information.  */
3739       check = emit_jump_insn_before (check, insn);
3740       JUMP_LABEL (check) = label;
3741       LABEL_NUSES (label)++;
3742     }
3743   else
3744     check = emit_insn_before (check, insn);
3745 
3746   /* Extend data structures.  */
3747   extend_all (check);
3748   RECOVERY_BLOCK (check) = rec;
3749 
3750   if (sched_verbose && spec_info->dump)
3751     fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
3752              (*current_sched_info->print_insn) (check, 0));
3753 
3754   gcc_assert (ORIG_PAT (insn));
3755 
3756   /* Initialize TWIN (twin is a duplicate of original instruction
3757      in the recovery block).  */
3758   if (rec != EXIT_BLOCK_PTR)
3759     {
3760       rtx link;
3761 
3762       for (link = RESOLVED_DEPS (insn); link; link = XEXP (link, 1))
3763 	if (DEP_STATUS (link) & DEP_OUTPUT)
3764 	  {
3765 	    RESOLVED_DEPS (check) =
3766 	      alloc_DEPS_LIST (XEXP (link, 0), RESOLVED_DEPS (check), DEP_TRUE);
3767 	    PUT_REG_NOTE_KIND (RESOLVED_DEPS (check), REG_DEP_TRUE);
3768 	  }
3769 
3770       twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
3771       extend_global (twin);
3772 
3773       if (sched_verbose && spec_info->dump)
3774 	/* INSN_BB (insn) isn't determined for twin insns yet.
3775 	   So we can't use current_sched_info->print_insn.  */
3776 	fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3777 		 INSN_UID (twin), rec->index);
3778     }
3779   else
3780     {
3781       ORIG_PAT (check) = ORIG_PAT (insn);
3782       HAS_INTERNAL_DEP (check) = 1;
3783       twin = check;
3784       /* ??? We probably should change all OUTPUT dependencies to
3785 	 (TRUE | OUTPUT).  */
3786     }
3787 
3788   RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
3789 
3790   if (rec != EXIT_BLOCK_PTR)
3791     /* In case of branchy check, fix CFG.  */
3792     {
3793       basic_block first_bb, second_bb;
3794       rtx jump;
3795       edge e;
3796       int edge_flags;
3797 
3798       first_bb = BLOCK_FOR_INSN (check);
3799       e = split_block (first_bb, check);
3800       /* split_block emits note if *check == BB_END.  Probably it
3801 	 is better to rip that note off.  */
3802       gcc_assert (e->src == first_bb);
3803       second_bb = e->dest;
3804 
3805       /* This is fixing of incoming edge.  */
3806       /* ??? Which other flags should be specified?  */
3807       if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
3808 	/* Partition type is the same, if it is "unpartitioned".  */
3809 	edge_flags = EDGE_CROSSING;
3810       else
3811 	edge_flags = 0;
3812 
3813       e = make_edge (first_bb, rec, edge_flags);
3814 
3815       add_block (second_bb, first_bb);
3816 
3817       gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (second_bb)));
3818       label = block_label (second_bb);
3819       jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
3820       JUMP_LABEL (jump) = label;
3821       LABEL_NUSES (label)++;
3822       extend_global (jump);
3823 
3824       if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
3825 	/* Partition type is the same, if it is "unpartitioned".  */
3826 	{
3827 	  /* Rewritten from cfgrtl.c.  */
3828 	  if (flag_reorder_blocks_and_partition
3829 	      && targetm.have_named_sections
3830 	      /*&& !any_condjump_p (jump)*/)
3831 	    /* any_condjump_p (jump) == false.
3832 	       We don't need the same note for the check because
3833 	       any_condjump_p (check) == true.  */
3834 	    {
3835 	      REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
3836 						    NULL_RTX,
3837 						    REG_NOTES (jump));
3838 	    }
3839 	  edge_flags = EDGE_CROSSING;
3840 	}
3841       else
3842 	edge_flags = 0;
3843 
3844       make_single_succ_edge (rec, second_bb, edge_flags);
3845 
3846       add_block (rec, EXIT_BLOCK_PTR);
3847     }
3848 
3849   /* Move backward dependences from INSN to CHECK and
3850      move forward dependences from INSN to TWIN.  */
3851   for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
3852     {
3853       ds_t ds;
3854 
3855       /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
3856 	 check --TRUE--> producer  ??? or ANTI ???
3857 	 twin  --TRUE--> producer
3858 	 twin  --ANTI--> check
3859 
3860 	 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
3861 	 check --ANTI--> producer
3862 	 twin  --ANTI--> producer
3863 	 twin  --ANTI--> check
3864 
3865 	 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
3866 	 check ~~TRUE~~> producer
3867 	 twin  ~~TRUE~~> producer
3868 	 twin  --ANTI--> check  */
3869 
3870       ds = DEP_STATUS (link);
3871 
3872       if (ds & BEGIN_SPEC)
3873 	{
3874 	  gcc_assert (!mutate_p);
3875 	  ds &= ~BEGIN_SPEC;
3876 	}
3877 
3878       if (rec != EXIT_BLOCK_PTR)
3879 	{
3880 	  add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3881 	  add_back_forw_dep (twin, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3882 	}
3883       else
3884 	add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3885     }
3886 
3887   for (link = LOG_LINKS (insn); link;)
3888     if ((DEP_STATUS (link) & BEGIN_SPEC)
3889 	|| mutate_p)
3890       /* We can delete this dep only if we totally overcome it with
3891 	 BEGIN_SPECULATION.  */
3892       {
3893         delete_back_forw_dep (insn, XEXP (link, 0));
3894         link = LOG_LINKS (insn);
3895       }
3896     else
3897       link = XEXP (link, 1);
3898 
3899   fs = 0;
3900 
3901   /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
3902      here.  */
3903 
3904   gcc_assert (!DONE_SPEC (insn));
3905 
3906   if (!mutate_p)
3907     {
3908       ds_t ts = TODO_SPEC (insn);
3909 
3910       DONE_SPEC (insn) = ts & BEGIN_SPEC;
3911       CHECK_SPEC (check) = ts & BEGIN_SPEC;
3912 
3913       if (ts & BEGIN_DATA)
3914 	fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
3915       if (ts & BEGIN_CONTROL)
3916 	fs = set_dep_weak (fs, BE_IN_CONTROL, get_dep_weak (ts, BEGIN_CONTROL));
3917     }
3918   else
3919     CHECK_SPEC (check) = CHECK_SPEC (insn);
3920 
3921   /* Future speculations: call the helper.  */
3922   process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, fs);
3923 
3924   if (rec != EXIT_BLOCK_PTR)
3925     {
3926       /* Which types of dependencies should we use here is,
3927 	 generally, machine-dependent question...  But, for now,
3928 	 it is not.  */
3929 
3930       if (!mutate_p)
3931 	{
3932 	  add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE);
3933 	  add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3934 	}
3935       else
3936 	{
3937 	  if (spec_info->dump)
3938 	    fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
3939 		     (*current_sched_info->print_insn) (insn, 0));
3940 
3941 	  for (link = INSN_DEPEND (insn); link; link = INSN_DEPEND (insn))
3942 	    delete_back_forw_dep (XEXP (link, 0), insn);
3943 
3944 	  if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
3945 	    try_ready (check);
3946 
3947 	  sched_remove_insn (insn);
3948 	}
3949 
3950       add_back_forw_dep (twin, check, REG_DEP_ANTI, DEP_ANTI);
3951     }
3952   else
3953     add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
3954 
3955   if (!mutate_p)
3956     /* Fix priorities.  If MUTATE_P is nonzero, this is not necessary,
3957        because it'll be done later in add_to_speculative_block.  */
3958     {
3959       clear_priorities (twin);
3960       calc_priorities (twin);
3961     }
3962 }
3963 
3964 /* Removes dependency between instructions in the recovery block REC
3965    and usual region instructions.  It keeps inner dependences so it
3966    won't be necessary to recompute them.  */
3967 static void
fix_recovery_deps(basic_block rec)3968 fix_recovery_deps (basic_block rec)
3969 {
3970   rtx note, insn, link, jump, ready_list = 0;
3971   bitmap_head in_ready;
3972 
3973   bitmap_initialize (&in_ready, 0);
3974 
3975   /* NOTE - a basic block note.  */
3976   note = NEXT_INSN (BB_HEAD (rec));
3977   gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
3978   insn = BB_END (rec);
3979   gcc_assert (JUMP_P (insn));
3980   insn = PREV_INSN (insn);
3981 
3982   do
3983     {
3984       for (link = INSN_DEPEND (insn); link;)
3985 	{
3986 	  rtx consumer;
3987 
3988 	  consumer = XEXP (link, 0);
3989 
3990 	  if (BLOCK_FOR_INSN (consumer) != rec)
3991 	    {
3992 	      delete_back_forw_dep (consumer, insn);
3993 
3994 	      if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
3995 		{
3996 		  ready_list = alloc_INSN_LIST (consumer, ready_list);
3997 		  bitmap_set_bit (&in_ready, INSN_LUID (consumer));
3998 		}
3999 
4000 	      link = INSN_DEPEND (insn);
4001 	    }
4002 	  else
4003 	    {
4004 	      gcc_assert ((DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
4005 
4006 	      link = XEXP (link, 1);
4007 	    }
4008 	}
4009 
4010       insn = PREV_INSN (insn);
4011     }
4012   while (insn != note);
4013 
4014   bitmap_clear (&in_ready);
4015 
4016   /* Try to add instructions to the ready or queue list.  */
4017   for (link = ready_list; link; link = XEXP (link, 1))
4018     try_ready (XEXP (link, 0));
4019   free_INSN_LIST_list (&ready_list);
4020 
4021   /* Fixing jump's dependences.  */
4022   insn = BB_HEAD (rec);
4023   jump = BB_END (rec);
4024 
4025   gcc_assert (LABEL_P (insn));
4026   insn = NEXT_INSN (insn);
4027 
4028   gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4029   add_jump_dependencies (insn, jump);
4030 }
4031 
4032 /* The function saves line notes at the beginning of block B.  */
4033 static void
associate_line_notes_with_blocks(basic_block b)4034 associate_line_notes_with_blocks (basic_block b)
4035 {
4036   rtx line;
4037 
4038   for (line = BB_HEAD (b); line; line = PREV_INSN (line))
4039     if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
4040       {
4041         line_note_head[b->index] = line;
4042         break;
4043       }
4044   /* Do a forward search as well, since we won't get to see the first
4045      notes in a basic block.  */
4046   for (line = BB_HEAD (b); line; line = NEXT_INSN (line))
4047     {
4048       if (INSN_P (line))
4049         break;
4050       if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
4051         line_note_head[b->index] = line;
4052     }
4053 }
4054 
4055 /* Changes pattern of the INSN to NEW_PAT.  */
4056 static void
change_pattern(rtx insn,rtx new_pat)4057 change_pattern (rtx insn, rtx new_pat)
4058 {
4059   int t;
4060 
4061   t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4062   gcc_assert (t);
4063   /* Invalidate INSN_COST, so it'll be recalculated.  */
4064   INSN_COST (insn) = -1;
4065   /* Invalidate INSN_TICK, so it'll be recalculated.  */
4066   INSN_TICK (insn) = INVALID_TICK;
4067   dfa_clear_single_insn_cache (insn);
4068 }
4069 
4070 
4071 /* -1 - can't speculate,
4072    0 - for speculation with REQUEST mode it is OK to use
4073    current instruction pattern,
4074    1 - need to change pattern for *NEW_PAT to be speculative.  */
4075 static int
speculate_insn(rtx insn,ds_t request,rtx * new_pat)4076 speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4077 {
4078   gcc_assert (current_sched_info->flags & DO_SPECULATION
4079               && (request & SPECULATIVE));
4080 
4081   if (!NONJUMP_INSN_P (insn)
4082       || HAS_INTERNAL_DEP (insn)
4083       || SCHED_GROUP_P (insn)
4084       || side_effects_p (PATTERN (insn))
4085       || (request & spec_info->mask) != request)
4086     return -1;
4087 
4088   gcc_assert (!IS_SPECULATION_CHECK_P (insn));
4089 
4090   if (request & BE_IN_SPEC)
4091     {
4092       if (may_trap_p (PATTERN (insn)))
4093         return -1;
4094 
4095       if (!(request & BEGIN_SPEC))
4096         return 0;
4097     }
4098 
4099   return targetm.sched.speculate_insn (insn, request & BEGIN_SPEC, new_pat);
4100 }
4101 
4102 /* Print some information about block BB, which starts with HEAD and
4103    ends with TAIL, before scheduling it.
4104    I is zero, if scheduler is about to start with the fresh ebb.  */
4105 static void
dump_new_block_header(int i,basic_block bb,rtx head,rtx tail)4106 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4107 {
4108   if (!i)
4109     fprintf (sched_dump,
4110 	     ";;   ======================================================\n");
4111   else
4112     fprintf (sched_dump,
4113 	     ";;   =====================ADVANCING TO=====================\n");
4114   fprintf (sched_dump,
4115 	   ";;   -- basic block %d from %d to %d -- %s reload\n",
4116 	   bb->index, INSN_UID (head), INSN_UID (tail),
4117 	   (reload_completed ? "after" : "before"));
4118   fprintf (sched_dump,
4119 	   ";;   ======================================================\n");
4120   fprintf (sched_dump, "\n");
4121 }
4122 
4123 /* Unlink basic block notes and labels and saves them, so they
4124    can be easily restored.  We unlink basic block notes in EBB to
4125    provide back-compatibility with the previous code, as target backends
4126    assume, that there'll be only instructions between
4127    current_sched_info->{head and tail}.  We restore these notes as soon
4128    as we can.
4129    FIRST (LAST) is the first (last) basic block in the ebb.
4130    NB: In usual case (FIRST == LAST) nothing is really done.  */
4131 void
unlink_bb_notes(basic_block first,basic_block last)4132 unlink_bb_notes (basic_block first, basic_block last)
4133 {
4134   /* We DON'T unlink basic block notes of the first block in the ebb.  */
4135   if (first == last)
4136     return;
4137 
4138   bb_header = xmalloc (last_basic_block * sizeof (*bb_header));
4139 
4140   /* Make a sentinel.  */
4141   if (last->next_bb != EXIT_BLOCK_PTR)
4142     bb_header[last->next_bb->index] = 0;
4143 
4144   first = first->next_bb;
4145   do
4146     {
4147       rtx prev, label, note, next;
4148 
4149       label = BB_HEAD (last);
4150       if (LABEL_P (label))
4151 	note = NEXT_INSN (label);
4152       else
4153 	note = label;
4154       gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4155 
4156       prev = PREV_INSN (label);
4157       next = NEXT_INSN (note);
4158       gcc_assert (prev && next);
4159 
4160       NEXT_INSN (prev) = next;
4161       PREV_INSN (next) = prev;
4162 
4163       bb_header[last->index] = label;
4164 
4165       if (last == first)
4166 	break;
4167 
4168       last = last->prev_bb;
4169     }
4170   while (1);
4171 }
4172 
4173 /* Restore basic block notes.
4174    FIRST is the first basic block in the ebb.  */
4175 static void
restore_bb_notes(basic_block first)4176 restore_bb_notes (basic_block first)
4177 {
4178   if (!bb_header)
4179     return;
4180 
4181   /* We DON'T unlink basic block notes of the first block in the ebb.  */
4182   first = first->next_bb;
4183   /* Remember: FIRST is actually a second basic block in the ebb.  */
4184 
4185   while (first != EXIT_BLOCK_PTR
4186 	 && bb_header[first->index])
4187     {
4188       rtx prev, label, note, next;
4189 
4190       label = bb_header[first->index];
4191       prev = PREV_INSN (label);
4192       next = NEXT_INSN (prev);
4193 
4194       if (LABEL_P (label))
4195 	note = NEXT_INSN (label);
4196       else
4197 	note = label;
4198       gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4199 
4200       bb_header[first->index] = 0;
4201 
4202       NEXT_INSN (prev) = label;
4203       NEXT_INSN (note) = next;
4204       PREV_INSN (next) = note;
4205 
4206       first = first->next_bb;
4207     }
4208 
4209   free (bb_header);
4210   bb_header = 0;
4211 }
4212 
4213 /* Extend per basic block data structures of the scheduler.
4214    If BB is NULL, initialize structures for the whole CFG.
4215    Otherwise, initialize them for the just created BB.  */
4216 static void
extend_bb(basic_block bb)4217 extend_bb (basic_block bb)
4218 {
4219   rtx insn;
4220 
4221   if (write_symbols != NO_DEBUG)
4222     {
4223       /* Save-line-note-head:
4224          Determine the line-number at the start of each basic block.
4225          This must be computed and saved now, because after a basic block's
4226          predecessor has been scheduled, it is impossible to accurately
4227          determine the correct line number for the first insn of the block.  */
4228       line_note_head = xrecalloc (line_note_head, last_basic_block,
4229 				  old_last_basic_block,
4230 				  sizeof (*line_note_head));
4231 
4232       if (bb)
4233 	associate_line_notes_with_blocks (bb);
4234       else
4235 	FOR_EACH_BB (bb)
4236 	  associate_line_notes_with_blocks (bb);
4237     }
4238 
4239   old_last_basic_block = last_basic_block;
4240 
4241   if (current_sched_info->flags & USE_GLAT)
4242     {
4243       glat_start = xrealloc (glat_start,
4244                              last_basic_block * sizeof (*glat_start));
4245       glat_end = xrealloc (glat_end, last_basic_block * sizeof (*glat_end));
4246     }
4247 
4248   /* The following is done to keep current_sched_info->next_tail non null.  */
4249 
4250   insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4251   if (NEXT_INSN (insn) == 0
4252       || (!NOTE_P (insn)
4253 	  && !LABEL_P (insn)
4254 	  /* Don't emit a NOTE if it would end up before a BARRIER.  */
4255 	  && !BARRIER_P (NEXT_INSN (insn))))
4256     {
4257       emit_note_after (NOTE_INSN_DELETED, insn);
4258       /* Make insn to appear outside BB.  */
4259       BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4260     }
4261 }
4262 
4263 /* Add a basic block BB to extended basic block EBB.
4264    If EBB is EXIT_BLOCK_PTR, then BB is recovery block.
4265    If EBB is NULL, then BB should be a new region.  */
4266 void
add_block(basic_block bb,basic_block ebb)4267 add_block (basic_block bb, basic_block ebb)
4268 {
4269   gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO
4270 	      && bb->il.rtl->global_live_at_start == 0
4271 	      && bb->il.rtl->global_live_at_end == 0);
4272 
4273   extend_bb (bb);
4274 
4275   glat_start[bb->index] = 0;
4276   glat_end[bb->index] = 0;
4277 
4278   if (current_sched_info->add_block)
4279     /* This changes only data structures of the front-end.  */
4280     current_sched_info->add_block (bb, ebb);
4281 }
4282 
4283 /* Helper function.
4284    Fix CFG after both in- and inter-block movement of
4285    control_flow_insn_p JUMP.  */
4286 static void
fix_jump_move(rtx jump)4287 fix_jump_move (rtx jump)
4288 {
4289   basic_block bb, jump_bb, jump_bb_next;
4290 
4291   bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4292   jump_bb = BLOCK_FOR_INSN (jump);
4293   jump_bb_next = jump_bb->next_bb;
4294 
4295   gcc_assert (current_sched_info->flags & SCHED_EBB
4296 	      || IS_SPECULATION_BRANCHY_CHECK_P (jump));
4297 
4298   if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4299     /* if jump_bb_next is not empty.  */
4300     BB_END (jump_bb) = BB_END (jump_bb_next);
4301 
4302   if (BB_END (bb) != PREV_INSN (jump))
4303     /* Then there are instruction after jump that should be placed
4304        to jump_bb_next.  */
4305     BB_END (jump_bb_next) = BB_END (bb);
4306   else
4307     /* Otherwise jump_bb_next is empty.  */
4308     BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
4309 
4310   /* To make assertion in move_insn happy.  */
4311   BB_END (bb) = PREV_INSN (jump);
4312 
4313   update_bb_for_insn (jump_bb_next);
4314 }
4315 
4316 /* Fix CFG after interblock movement of control_flow_insn_p JUMP.  */
4317 static void
move_block_after_check(rtx jump)4318 move_block_after_check (rtx jump)
4319 {
4320   basic_block bb, jump_bb, jump_bb_next;
4321   VEC(edge,gc) *t;
4322 
4323   bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4324   jump_bb = BLOCK_FOR_INSN (jump);
4325   jump_bb_next = jump_bb->next_bb;
4326 
4327   update_bb_for_insn (jump_bb);
4328 
4329   gcc_assert (IS_SPECULATION_CHECK_P (jump)
4330 	      || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
4331 
4332   unlink_block (jump_bb_next);
4333   link_block (jump_bb_next, bb);
4334 
4335   t = bb->succs;
4336   bb->succs = 0;
4337   move_succs (&(jump_bb->succs), bb);
4338   move_succs (&(jump_bb_next->succs), jump_bb);
4339   move_succs (&t, jump_bb_next);
4340 
4341   if (current_sched_info->fix_recovery_cfg)
4342     current_sched_info->fix_recovery_cfg
4343       (bb->index, jump_bb->index, jump_bb_next->index);
4344 }
4345 
4346 /* Helper function for move_block_after_check.
4347    This functions attaches edge vector pointed to by SUCCSP to
4348    block TO.  */
4349 static void
move_succs(VEC (edge,gc)** succsp,basic_block to)4350 move_succs (VEC(edge,gc) **succsp, basic_block to)
4351 {
4352   edge e;
4353   edge_iterator ei;
4354 
4355   gcc_assert (to->succs == 0);
4356 
4357   to->succs = *succsp;
4358 
4359   FOR_EACH_EDGE (e, ei, to->succs)
4360     e->src = to;
4361 
4362   *succsp = 0;
4363 }
4364 
4365 /* Initialize GLAT (global_live_at_{start, end}) structures.
4366    GLAT structures are used to substitute global_live_{start, end}
4367    regsets during scheduling.  This is necessary to use such functions as
4368    split_block (), as they assume consistency of register live information.  */
4369 static void
init_glat(void)4370 init_glat (void)
4371 {
4372   basic_block bb;
4373 
4374   FOR_ALL_BB (bb)
4375     init_glat1 (bb);
4376 }
4377 
4378 /* Helper function for init_glat.  */
4379 static void
init_glat1(basic_block bb)4380 init_glat1 (basic_block bb)
4381 {
4382   gcc_assert (bb->il.rtl->global_live_at_start != 0
4383 	      && bb->il.rtl->global_live_at_end != 0);
4384 
4385   glat_start[bb->index] = bb->il.rtl->global_live_at_start;
4386   glat_end[bb->index] = bb->il.rtl->global_live_at_end;
4387 
4388   if (current_sched_info->flags & DETACH_LIFE_INFO)
4389     {
4390       bb->il.rtl->global_live_at_start = 0;
4391       bb->il.rtl->global_live_at_end = 0;
4392     }
4393 }
4394 
4395 /* Attach reg_live_info back to basic blocks.
4396    Also save regsets, that should not have been changed during scheduling,
4397    for checking purposes (see check_reg_live).  */
4398 void
attach_life_info(void)4399 attach_life_info (void)
4400 {
4401   basic_block bb;
4402 
4403   FOR_ALL_BB (bb)
4404     attach_life_info1 (bb);
4405 }
4406 
4407 /* Helper function for attach_life_info.  */
4408 static void
attach_life_info1(basic_block bb)4409 attach_life_info1 (basic_block bb)
4410 {
4411   gcc_assert (bb->il.rtl->global_live_at_start == 0
4412 	      && bb->il.rtl->global_live_at_end == 0);
4413 
4414   if (glat_start[bb->index])
4415     {
4416       gcc_assert (glat_end[bb->index]);
4417 
4418       bb->il.rtl->global_live_at_start = glat_start[bb->index];
4419       bb->il.rtl->global_live_at_end = glat_end[bb->index];
4420 
4421       /* Make them NULL, so they won't be freed in free_glat.  */
4422       glat_start[bb->index] = 0;
4423       glat_end[bb->index] = 0;
4424 
4425 #ifdef ENABLE_CHECKING
4426       if (bb->index < NUM_FIXED_BLOCKS
4427 	  || current_sched_info->region_head_or_leaf_p (bb, 0))
4428 	{
4429 	  glat_start[bb->index] = ALLOC_REG_SET (&reg_obstack);
4430 	  COPY_REG_SET (glat_start[bb->index],
4431 			bb->il.rtl->global_live_at_start);
4432 	}
4433 
4434       if (bb->index < NUM_FIXED_BLOCKS
4435 	  || current_sched_info->region_head_or_leaf_p (bb, 1))
4436 	{
4437 	  glat_end[bb->index] = ALLOC_REG_SET (&reg_obstack);
4438 	  COPY_REG_SET (glat_end[bb->index], bb->il.rtl->global_live_at_end);
4439 	}
4440 #endif
4441     }
4442   else
4443     {
4444       gcc_assert (!glat_end[bb->index]);
4445 
4446       bb->il.rtl->global_live_at_start = ALLOC_REG_SET (&reg_obstack);
4447       bb->il.rtl->global_live_at_end = ALLOC_REG_SET (&reg_obstack);
4448     }
4449 }
4450 
4451 /* Free GLAT information.  */
4452 static void
free_glat(void)4453 free_glat (void)
4454 {
4455 #ifdef ENABLE_CHECKING
4456   if (current_sched_info->flags & DETACH_LIFE_INFO)
4457     {
4458       basic_block bb;
4459 
4460       FOR_ALL_BB (bb)
4461 	{
4462 	  if (glat_start[bb->index])
4463 	    FREE_REG_SET (glat_start[bb->index]);
4464 	  if (glat_end[bb->index])
4465 	    FREE_REG_SET (glat_end[bb->index]);
4466 	}
4467     }
4468 #endif
4469 
4470   free (glat_start);
4471   free (glat_end);
4472 }
4473 
4474 /* Remove INSN from the instruction stream.
4475    INSN should have any dependencies.  */
4476 static void
sched_remove_insn(rtx insn)4477 sched_remove_insn (rtx insn)
4478 {
4479   change_queue_index (insn, QUEUE_NOWHERE);
4480   current_sched_info->add_remove_insn (insn, 1);
4481   remove_insn (insn);
4482 }
4483 
4484 /* Clear priorities of all instructions, that are
4485    forward dependent on INSN.  */
4486 static void
clear_priorities(rtx insn)4487 clear_priorities (rtx insn)
4488 {
4489   rtx link;
4490 
4491   for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
4492     {
4493       rtx pro;
4494 
4495       pro = XEXP (link, 0);
4496       if (INSN_PRIORITY_KNOWN (pro))
4497 	{
4498 	  INSN_PRIORITY_KNOWN (pro) = 0;
4499 	  clear_priorities (pro);
4500 	}
4501     }
4502 }
4503 
4504 /* Recompute priorities of instructions, whose priorities might have been
4505    changed due to changes in INSN.  */
4506 static void
calc_priorities(rtx insn)4507 calc_priorities (rtx insn)
4508 {
4509   rtx link;
4510 
4511   for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
4512     {
4513       rtx pro;
4514 
4515       pro = XEXP (link, 0);
4516       if (!INSN_PRIORITY_KNOWN (pro))
4517 	{
4518 	  priority (pro);
4519 	  calc_priorities (pro);
4520 	}
4521     }
4522 }
4523 
4524 
4525 /* Add dependences between JUMP and other instructions in the recovery
4526    block.  INSN is the first insn the recovery block.  */
4527 static void
add_jump_dependencies(rtx insn,rtx jump)4528 add_jump_dependencies (rtx insn, rtx jump)
4529 {
4530   do
4531     {
4532       insn = NEXT_INSN (insn);
4533       if (insn == jump)
4534 	break;
4535 
4536       if (!INSN_DEPEND (insn))
4537 	add_back_forw_dep (jump, insn, REG_DEP_ANTI, DEP_ANTI);
4538     }
4539   while (1);
4540   gcc_assert (LOG_LINKS (jump));
4541 }
4542 
4543 /* Return the NOTE_INSN_BASIC_BLOCK of BB.  */
4544 rtx
bb_note(basic_block bb)4545 bb_note (basic_block bb)
4546 {
4547   rtx note;
4548 
4549   note = BB_HEAD (bb);
4550   if (LABEL_P (note))
4551     note = NEXT_INSN (note);
4552 
4553   gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4554   return note;
4555 }
4556 
4557 #ifdef ENABLE_CHECKING
4558 extern void debug_spec_status (ds_t);
4559 
4560 /* Dump information about the dependence status S.  */
4561 void
debug_spec_status(ds_t s)4562 debug_spec_status (ds_t s)
4563 {
4564   FILE *f = stderr;
4565 
4566   if (s & BEGIN_DATA)
4567     fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak (s, BEGIN_DATA));
4568   if (s & BE_IN_DATA)
4569     fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak (s, BE_IN_DATA));
4570   if (s & BEGIN_CONTROL)
4571     fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak (s, BEGIN_CONTROL));
4572   if (s & BE_IN_CONTROL)
4573     fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak (s, BE_IN_CONTROL));
4574 
4575   if (s & HARD_DEP)
4576     fprintf (f, "HARD_DEP; ");
4577 
4578   if (s & DEP_TRUE)
4579     fprintf (f, "DEP_TRUE; ");
4580   if (s & DEP_ANTI)
4581     fprintf (f, "DEP_ANTI; ");
4582   if (s & DEP_OUTPUT)
4583     fprintf (f, "DEP_OUTPUT; ");
4584 
4585   fprintf (f, "\n");
4586 }
4587 
4588 /* Helper function for check_cfg.
4589    Return nonzero, if edge vector pointed to by EL has edge with TYPE in
4590    its flags.  */
4591 static int
has_edge_p(VEC (edge,gc)* el,int type)4592 has_edge_p (VEC(edge,gc) *el, int type)
4593 {
4594   edge e;
4595   edge_iterator ei;
4596 
4597   FOR_EACH_EDGE (e, ei, el)
4598     if (e->flags & type)
4599       return 1;
4600   return 0;
4601 }
4602 
4603 /* Check few properties of CFG between HEAD and TAIL.
4604    If HEAD (TAIL) is NULL check from the beginning (till the end) of the
4605    instruction stream.  */
4606 static void
check_cfg(rtx head,rtx tail)4607 check_cfg (rtx head, rtx tail)
4608 {
4609   rtx next_tail;
4610   basic_block bb = 0;
4611   int not_first = 0, not_last;
4612 
4613   if (head == NULL)
4614     head = get_insns ();
4615   if (tail == NULL)
4616     tail = get_last_insn ();
4617   next_tail = NEXT_INSN (tail);
4618 
4619   do
4620     {
4621       not_last = head != tail;
4622 
4623       if (not_first)
4624 	gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
4625       if (not_last)
4626 	gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
4627 
4628       if (LABEL_P (head)
4629 	  || (NOTE_INSN_BASIC_BLOCK_P (head)
4630 	      && (!not_first
4631 		  || (not_first && !LABEL_P (PREV_INSN (head))))))
4632 	{
4633 	  gcc_assert (bb == 0);
4634 	  bb = BLOCK_FOR_INSN (head);
4635 	  if (bb != 0)
4636 	    gcc_assert (BB_HEAD (bb) == head);
4637 	  else
4638 	    /* This is the case of jump table.  See inside_basic_block_p ().  */
4639 	    gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
4640 	}
4641 
4642       if (bb == 0)
4643 	{
4644 	  gcc_assert (!inside_basic_block_p (head));
4645 	  head = NEXT_INSN (head);
4646 	}
4647       else
4648 	{
4649 	  gcc_assert (inside_basic_block_p (head)
4650 		      || NOTE_P (head));
4651 	  gcc_assert (BLOCK_FOR_INSN (head) == bb);
4652 
4653 	  if (LABEL_P (head))
4654 	    {
4655 	      head = NEXT_INSN (head);
4656 	      gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
4657 	    }
4658 	  else
4659 	    {
4660 	      if (control_flow_insn_p (head))
4661 		{
4662 		  gcc_assert (BB_END (bb) == head);
4663 
4664 		  if (any_uncondjump_p (head))
4665 		    gcc_assert (EDGE_COUNT (bb->succs) == 1
4666 				&& BARRIER_P (NEXT_INSN (head)));
4667 		  else if (any_condjump_p (head))
4668 		    gcc_assert (/* Usual case.  */
4669                                 (EDGE_COUNT (bb->succs) > 1
4670                                  && !BARRIER_P (NEXT_INSN (head)))
4671                                 /* Or jump to the next instruction.  */
4672                                 || (EDGE_COUNT (bb->succs) == 1
4673                                     && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
4674                                         == JUMP_LABEL (head))));
4675 		}
4676 	      if (BB_END (bb) == head)
4677 		{
4678 		  if (EDGE_COUNT (bb->succs) > 1)
4679 		    gcc_assert (control_flow_insn_p (head)
4680 				|| has_edge_p (bb->succs, EDGE_COMPLEX));
4681 		  bb = 0;
4682 		}
4683 
4684 	      head = NEXT_INSN (head);
4685 	    }
4686 	}
4687 
4688       not_first = 1;
4689     }
4690   while (head != next_tail);
4691 
4692   gcc_assert (bb == 0);
4693 }
4694 
4695 /* Perform a few consistency checks of flags in different data structures.  */
4696 static void
check_sched_flags(void)4697 check_sched_flags (void)
4698 {
4699   unsigned int f = current_sched_info->flags;
4700 
4701   if (flag_sched_stalled_insns)
4702     gcc_assert (!(f & DO_SPECULATION));
4703   if (f & DO_SPECULATION)
4704     gcc_assert (!flag_sched_stalled_insns
4705 		&& (f & DETACH_LIFE_INFO)
4706 		&& spec_info
4707 		&& spec_info->mask);
4708   if (f & DETACH_LIFE_INFO)
4709     gcc_assert (f & USE_GLAT);
4710 }
4711 
4712 /* Check global_live_at_{start, end} regsets.
4713    If FATAL_P is TRUE, then abort execution at the first failure.
4714    Otherwise, print diagnostics to STDERR (this mode is for calling
4715    from debugger).  */
4716 void
check_reg_live(bool fatal_p)4717 check_reg_live (bool fatal_p)
4718 {
4719   basic_block bb;
4720 
4721   FOR_ALL_BB (bb)
4722     {
4723       int i;
4724 
4725       i = bb->index;
4726 
4727       if (glat_start[i])
4728 	{
4729 	  bool b = bitmap_equal_p (bb->il.rtl->global_live_at_start,
4730 				   glat_start[i]);
4731 
4732 	  if (!b)
4733 	    {
4734 	      gcc_assert (!fatal_p);
4735 
4736 	      fprintf (stderr, ";; check_reg_live_at_start (%d) failed.\n", i);
4737 	    }
4738 	}
4739 
4740       if (glat_end[i])
4741 	{
4742 	  bool b = bitmap_equal_p (bb->il.rtl->global_live_at_end,
4743 				   glat_end[i]);
4744 
4745 	  if (!b)
4746 	    {
4747 	      gcc_assert (!fatal_p);
4748 
4749 	      fprintf (stderr, ";; check_reg_live_at_end (%d) failed.\n", i);
4750 	    }
4751 	}
4752     }
4753 }
4754 #endif /* ENABLE_CHECKING */
4755 
4756 #endif /* INSN_SCHEDULING */
4757