xref: /386bsd/usr/src/usr.bin/gcc/cc1/sched.c (revision a2142627)
1 /* Instruction scheduling pass.
2    Copyright (C) 1992 Free Software Foundation, Inc.
3    Contributed by Michael Tiemann (tiemann@cygnus.com)
4    Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com)
5 
6 This file is part of GNU CC.
7 
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12 
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 GNU General Public License for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING.  If not, write to
20 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.  */
21 
22 /* Instruction scheduling pass.
23 
24    This pass implements list scheduling within basic blocks.  It is
25    run after flow analysis, but before register allocation.  The
26    scheduler works as follows:
27 
28    We compute insn priorities based on data dependencies.  Flow
29    analysis only creates a fraction of the data-dependencies we must
30    observe: namely, only those dependencies which the combiner can be
31    expected to use.  For this pass, we must therefore create the
32    remaining dependencies we need to observe: register dependencies,
33    memory dependencies, dependencies to keep function calls in order,
34    and the dependence between a conditional branch and the setting of
35    condition codes are all dealt with here.
36 
37    The scheduler first traverses the data flow graph, starting with
38    the last instruction, and proceeding to the first, assigning
39    values to insn_priority as it goes.  This sorts the instructions
40    topologically by data dependence.
41 
42    Once priorities have been established, we order the insns using
43    list scheduling.  This works as follows: starting with a list of
44    all the ready insns, and sorted according to priority number, we
45    schedule the insn from the end of the list by placing its
46    predecessors in the list according to their priority order.  We
47    consider this insn scheduled by setting the pointer to the "end" of
48    the list to point to the previous insn.  When an insn has no
49    predecessors, we either queue it until sufficient time has elapsed
50    or add it to the ready list.  As the instructions are scheduled or
51    when stalls are introduced, the queue advances and dumps insns into
52    the ready list.  When all insns down to the lowest priority have
53    been scheduled, the critical path of the basic block has been made
54    as short as possible.  The remaining insns are then scheduled in
55    remaining slots.
56 
57    Function unit conflicts are resolved during reverse list scheduling
58    by tracking the time when each insn is committed to the schedule
59    and from that, the time the function units it uses must be free.
60    As insns on the ready list are considered for scheduling, those
61    that would result in a blockage of the already committed insns are
62    queued until no blockage will result.  Among the remaining insns on
63    the ready list to be considered, the first one with the largest
64    potential for causing a subsequent blockage is chosen.
65 
66    The following list shows the order in which we want to break ties
67    among insns in the ready list:
68 
69 	1.  choose insn with lowest conflict cost, ties broken by
70 	2.  choose insn with the longest path to end of bb, ties broken by
71 	3.  choose insn that kills the most registers, ties broken by
72 	4.  choose insn that conflicts with the most ready insns, or finally
73 	5.  choose insn with lowest UID.
74 
75    Memory references complicate matters.  Only if we can be certain
76    that memory references are not part of the data dependency graph
77    (via true, anti, or output dependence), can we move operations past
78    memory references.  To first approximation, reads can be done
79    independently, while writes introduce dependencies.  Better
80    approximations will yield fewer dependencies.
81 
82    Dependencies set up by memory references are treated in exactly the
83    same way as other dependencies, by using LOG_LINKS.
84 
85    Having optimized the critical path, we may have also unduly
86    extended the lifetimes of some registers.  If an operation requires
87    that constants be loaded into registers, it is certainly desirable
88    to load those constants as early as necessary, but no earlier.
89    I.e., it will not do to load up a bunch of registers at the
90    beginning of a basic block only to use them at the end, if they
91    could be loaded later, since this may result in excessive register
92    utilization.
93 
94    Note that since branches are never in basic blocks, but only end
95    basic blocks, this pass will not do any branch scheduling.  But
96    that is ok, since we can use GNU's delayed branch scheduling
97    pass to take care of this case.
98 
99    Also note that no further optimizations based on algebraic identities
100    are performed, so this pass would be a good one to perform instruction
101    splitting, such as breaking up a multiply instruction into shifts
102    and adds where that is profitable.
103 
104    Given the memory aliasing analysis that this pass should perform,
105    it should be possible to remove redundant stores to memory, and to
106    load values from registers instead of hitting memory.
107 
108    This pass must update information that subsequent passes expect to be
109    correct.  Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
110    reg_n_calls_crossed, and reg_live_length.  Also, basic_block_head,
111    basic_block_end.
112 
113    The information in the line number notes is carefully retained by this
114    pass.  All other NOTE insns are grouped in their same relative order at
115    the beginning of basic blocks that have been scheduled.  */
116 
117 #include <stdio.h>
118 #include "config.h"
119 #include "rtl.h"
120 #include "basic-block.h"
121 #include "regs.h"
122 #include "hard-reg-set.h"
123 #include "flags.h"
124 #include "insn-config.h"
125 #include "insn-attr.h"
126 
127 #ifdef INSN_SCHEDULING
128 /* Arrays set up by scheduling for the same respective purposes as
129    similar-named arrays set up by flow analysis.  We work with these
130    arrays during the scheduling pass so we can compare values against
131    unscheduled code.
132 
133    Values of these arrays are copied at the end of this pass into the
134    arrays set up by flow analysis.  */
135 static short *sched_reg_n_deaths;
136 static int *sched_reg_n_calls_crossed;
137 static int *sched_reg_live_length;
138 
139 /* Element N is the next insn that sets (hard or pseudo) register
140    N within the current basic block; or zero, if there is no
141    such insn.  Needed for new registers which may be introduced
142    by splitting insns.  */
143 static rtx *reg_last_uses;
144 static rtx *reg_last_sets;
145 
146 /* Vector indexed by INSN_UID giving the original ordering of the insns.  */
147 static int *insn_luid;
148 #define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
149 
150 /* Vector indexed by INSN_UID giving each instruction a priority.  */
151 static int *insn_priority;
152 #define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
153 
154 static short *insn_costs;
155 #define INSN_COST(INSN)	insn_costs[INSN_UID (INSN)]
156 
157 /* Vector indexed by INSN_UID giving an encoding of the function units
158    used.  */
159 static short *insn_units;
160 #define INSN_UNIT(INSN)	insn_units[INSN_UID (INSN)]
161 
162 /* Vector indexed by INSN_UID giving an encoding of the blockage range
163    function.  The unit and the range are encoded.  */
164 static unsigned int *insn_blockage;
165 #define INSN_BLOCKAGE(INSN) insn_blockage[INSN_UID (INSN)]
166 #define UNIT_BITS 5
167 #define BLOCKAGE_MASK ((1 << BLOCKAGE_BITS) - 1)
168 #define ENCODE_BLOCKAGE(U,R)				\
169   ((((U) << UNIT_BITS) << BLOCKAGE_BITS			\
170     | MIN_BLOCKAGE_COST (R)) << BLOCKAGE_BITS		\
171    | MAX_BLOCKAGE_COST (R))
172 #define UNIT_BLOCKED(B) ((B) >> (2 * BLOCKAGE_BITS))
173 #define BLOCKAGE_RANGE(B) \
174   (((((B) >> BLOCKAGE_BITS) & BLOCKAGE_MASK) << (HOST_BITS_PER_INT / 2)) \
175    | (B) & BLOCKAGE_MASK)
176 
177 /* Encodings of the `<name>_unit_blockage_range' function.  */
178 #define MIN_BLOCKAGE_COST(R) ((R) >> (HOST_BITS_PER_INT / 2))
179 #define MAX_BLOCKAGE_COST(R) ((R) & ((1 << (HOST_BITS_PER_INT / 2)) - 1))
180 
181 #define DONE_PRIORITY	-1
182 #define MAX_PRIORITY	0x7fffffff
183 #define TAIL_PRIORITY	0x7ffffffe
184 #define LAUNCH_PRIORITY	0x7f000001
185 #define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
186 #define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
187 
188 /* Vector indexed by INSN_UID giving number of insns referring to this insn.  */
189 static int *insn_ref_count;
190 #define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
191 
192 /* Vector indexed by INSN_UID giving line-number note in effect for each
193    insn.  For line-number notes, this indicates whether the note may be
194    reused.  */
195 static rtx *line_note;
196 #define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
197 
198 /* Vector indexed by basic block number giving the starting line-number
199    for each basic block.  */
200 static rtx *line_note_head;
201 
202 /* List of important notes we must keep around.  This is a pointer to the
203    last element in the list.  */
204 static rtx note_list;
205 
206 /* Regsets telling whether a given register is live or dead before the last
207    scheduled insn.  Must scan the instructions once before scheduling to
208    determine what registers are live or dead at the end of the block.  */
209 static regset bb_dead_regs;
210 static regset bb_live_regs;
211 
212 /* Regset telling whether a given register is live after the insn currently
213    being scheduled.  Before processing an insn, this is equal to bb_live_regs
214    above.  This is used so that we can find registers that are newly born/dead
215    after processing an insn.  */
216 static regset old_live_regs;
217 
218 /* The chain of REG_DEAD notes.  REG_DEAD notes are removed from all insns
219    during the initial scan and reused later.  If there are not exactly as
220    many REG_DEAD notes in the post scheduled code as there were in the
221    prescheduled code then we trigger an abort because this indicates a bug.  */
222 static rtx dead_notes;
223 
224 /* Queues, etc.  */
225 
226 /* An instruction is ready to be scheduled when all insns following it
227    have already been scheduled.  It is important to ensure that all
228    insns which use its result will not be executed until its result
229    has been computed.  An insn is maintained in one of four structures:
230 
231    (P) the "Pending" set of insns which cannot be scheduled until
232    their dependencies have been satisfied.
233    (Q) the "Queued" set of insns that can be scheduled when sufficient
234    time has passed.
235    (R) the "Ready" list of unscheduled, uncommitted insns.
236    (S) the "Scheduled" list of insns.
237 
238    Initially, all insns are either "Pending" or "Ready" depending on
239    whether their dependencies are satisfied.
240 
241    Insns move from the "Ready" list to the "Scheduled" list as they
242    are committed to the schedule.  As this occurs, the insns in the
243    "Pending" list have their dependencies satisfied and move to either
244    the "Ready" list or the "Queued" set depending on whether
245    sufficient time has passed to make them ready.  As time passes,
246    insns move from the "Queued" set to the "Ready" list.  Insns may
247    move from the "Ready" list to the "Queued" set if they are blocked
248    due to a function unit conflict.
249 
250    The "Pending" list (P) are the insns in the LOG_LINKS of the unscheduled
251    insns, i.e., those that are ready, queued, and pending.
252    The "Queued" set (Q) is implemented by the variable `insn_queue'.
253    The "Ready" list (R) is implemented by the variables `ready' and
254    `n_ready'.
255    The "Scheduled" list (S) is the new insn chain built by this pass.
256 
257    The transition (R->S) is implemented in the scheduling loop in
258    `schedule_block' when the best insn to schedule is chosen.
259    The transition (R->Q) is implemented in `schedule_select' when an
260    insn is found to to have a function unit conflict with the already
261    committed insns.
262    The transitions (P->R and P->Q) are implemented in `schedule_insn' as
263    insns move from the ready list to the scheduled list.
264    The transition (Q->R) is implemented at the top of the scheduling
265    loop in `schedule_block' as time passes or stalls are introduced.  */
266 
267 /* Implement a circular buffer to delay instructions until sufficient
268    time has passed.  INSN_QUEUE_SIZE is a power of two larger than
269    MAX_BLOCKAGE and MAX_READY_COST computed by genattr.c.  This is the
270    longest time an isnsn may be queued.  */
271 static rtx insn_queue[INSN_QUEUE_SIZE];
272 static int q_ptr = 0;
273 static int q_size = 0;
274 #define NEXT_Q(X) (((X)+1) & (INSN_QUEUE_SIZE-1))
275 #define NEXT_Q_AFTER(X,C) (((X)+C) & (INSN_QUEUE_SIZE-1))
276 
277 /* Vector indexed by INSN_UID giving the minimum clock tick at which
278    the insn becomes ready.  This is used to note timing constraints for
279    insns in the pending list.  */
280 static int *insn_tick;
281 #define INSN_TICK(INSN) (insn_tick[INSN_UID (INSN)])
282 
283 /* Forward declarations.  */
284 static void sched_analyze_2 ();
285 static void schedule_block ();
286 
287 /* Main entry point of this file.  */
288 void schedule_insns ();
289 #endif /* INSN_SCHEDULING */
290 
291 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
292 
293 /* Vector indexed by N giving the initial (unchanging) value known
294    for pseudo-register N.  */
295 static rtx *reg_known_value;
296 
297 /* Vector recording for each reg_known_value whether it is due to a
298    REG_EQUIV note.  Future passes (viz., reload) may replace the
299    pseudo with the equivalent expression and so we account for the
300    dependences that would be introduced if that happens. */
301 /* ??? This is a problem only on the Convex.  The REG_EQUIV notes created in
302    assign_parms mention the arg pointer, and there are explicit insns in the
303    RTL that modify the arg pointer.  Thus we must ensure that such insns don't
304    get scheduled across each other because that would invalidate the REG_EQUIV
305    notes.  One could argue that the REG_EQUIV notes are wrong, but solving
306    the problem in the scheduler will likely give better code, so we do it
307    here.  */
308 static char *reg_known_equiv_p;
309 
310 /* Indicates number of valid entries in reg_known_value.  */
311 static int reg_known_value_size;
312 
313 static rtx
canon_rtx(x)314 canon_rtx (x)
315      rtx x;
316 {
317   if (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER
318       && REGNO (x) <= reg_known_value_size)
319     return reg_known_value[REGNO (x)];
320   else if (GET_CODE (x) == PLUS)
321     {
322       rtx x0 = canon_rtx (XEXP (x, 0));
323       rtx x1 = canon_rtx (XEXP (x, 1));
324 
325       if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
326 	{
327 	  /* We can tolerate LO_SUMs being offset here; these
328 	     rtl are used for nothing other than comparisons.  */
329 	  if (GET_CODE (x0) == CONST_INT)
330 	    return plus_constant_for_output (x1, INTVAL (x0));
331 	  else if (GET_CODE (x1) == CONST_INT)
332 	    return plus_constant_for_output (x0, INTVAL (x1));
333 	  return gen_rtx (PLUS, GET_MODE (x), x0, x1);
334 	}
335     }
336   return x;
337 }
338 
339 /* Set up all info needed to perform alias analysis on memory references.  */
340 
341 void
init_alias_analysis()342 init_alias_analysis ()
343 {
344   int maxreg = max_reg_num ();
345   rtx insn;
346   rtx note;
347   rtx set;
348 
349   reg_known_value_size = maxreg;
350 
351   reg_known_value
352     = (rtx *) oballoc ((maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx))
353       - FIRST_PSEUDO_REGISTER;
354   bzero (reg_known_value+FIRST_PSEUDO_REGISTER,
355 	 (maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx));
356 
357   reg_known_equiv_p
358     = (char *) oballoc ((maxreg-FIRST_PSEUDO_REGISTER) * sizeof (char))
359       - FIRST_PSEUDO_REGISTER;
360   bzero (reg_known_equiv_p+FIRST_PSEUDO_REGISTER,
361 	 (maxreg-FIRST_PSEUDO_REGISTER) * sizeof (char));
362 
363   /* Fill in the entries with known constant values.  */
364   for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
365     if ((set = single_set (insn)) != 0
366 	&& GET_CODE (SET_DEST (set)) == REG
367 	&& REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
368 	&& (((note = find_reg_note (insn, REG_EQUAL, 0)) != 0
369 	     && reg_n_sets[REGNO (SET_DEST (set))] == 1)
370 	    || (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != 0)
371 	&& GET_CODE (XEXP (note, 0)) != EXPR_LIST)
372       {
373 	int regno = REGNO (SET_DEST (set));
374 	reg_known_value[regno] = XEXP (note, 0);
375 	reg_known_equiv_p[regno] = REG_NOTE_KIND (note) == REG_EQUIV;
376       }
377 
378   /* Fill in the remaining entries.  */
379   while (--maxreg >= FIRST_PSEUDO_REGISTER)
380     if (reg_known_value[maxreg] == 0)
381       reg_known_value[maxreg] = regno_reg_rtx[maxreg];
382 }
383 
384 /* Return 1 if X and Y are identical-looking rtx's.
385 
386    We use the data in reg_known_value above to see if two registers with
387    different numbers are, in fact, equivalent.  */
388 
389 static int
rtx_equal_for_memref_p(x,y)390 rtx_equal_for_memref_p (x, y)
391      rtx x, y;
392 {
393   register int i;
394   register int j;
395   register enum rtx_code code;
396   register char *fmt;
397 
398   if (x == 0 && y == 0)
399     return 1;
400   if (x == 0 || y == 0)
401     return 0;
402   x = canon_rtx (x);
403   y = canon_rtx (y);
404 
405   if (x == y)
406     return 1;
407 
408   code = GET_CODE (x);
409   /* Rtx's of different codes cannot be equal.  */
410   if (code != GET_CODE (y))
411     return 0;
412 
413   /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
414      (REG:SI x) and (REG:HI x) are NOT equivalent.  */
415 
416   if (GET_MODE (x) != GET_MODE (y))
417     return 0;
418 
419   /* REG, LABEL_REF, and SYMBOL_REF can be compared nonrecursively.  */
420 
421   if (code == REG)
422     return REGNO (x) == REGNO (y);
423   if (code == LABEL_REF)
424     return XEXP (x, 0) == XEXP (y, 0);
425   if (code == SYMBOL_REF)
426     return XSTR (x, 0) == XSTR (y, 0);
427 
428   /* Compare the elements.  If any pair of corresponding elements
429      fail to match, return 0 for the whole things.  */
430 
431   fmt = GET_RTX_FORMAT (code);
432   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
433     {
434       switch (fmt[i])
435 	{
436 	case 'w':
437 	  if (XWINT (x, i) != XWINT (y, i))
438 	    return 0;
439 	  break;
440 
441 	case 'n':
442 	case 'i':
443 	  if (XINT (x, i) != XINT (y, i))
444 	    return 0;
445 	  break;
446 
447 	case 'V':
448 	case 'E':
449 	  /* Two vectors must have the same length.  */
450 	  if (XVECLEN (x, i) != XVECLEN (y, i))
451 	    return 0;
452 
453 	  /* And the corresponding elements must match.  */
454 	  for (j = 0; j < XVECLEN (x, i); j++)
455 	    if (rtx_equal_for_memref_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0)
456 	      return 0;
457 	  break;
458 
459 	case 'e':
460 	  if (rtx_equal_for_memref_p (XEXP (x, i), XEXP (y, i)) == 0)
461 	    return 0;
462 	  break;
463 
464 	case 'S':
465 	case 's':
466 	  if (strcmp (XSTR (x, i), XSTR (y, i)))
467 	    return 0;
468 	  break;
469 
470 	case 'u':
471 	  /* These are just backpointers, so they don't matter.  */
472 	  break;
473 
474 	case '0':
475 	  break;
476 
477 	  /* It is believed that rtx's at this level will never
478 	     contain anything but integers and other rtx's,
479 	     except for within LABEL_REFs and SYMBOL_REFs.  */
480 	default:
481 	  abort ();
482 	}
483     }
484   return 1;
485 }
486 
487 /* Given an rtx X, find a SYMBOL_REF or LABEL_REF within
488    X and return it, or return 0 if none found.  */
489 
490 static rtx
find_symbolic_term(x)491 find_symbolic_term (x)
492      rtx x;
493 {
494   register int i;
495   register enum rtx_code code;
496   register char *fmt;
497 
498   code = GET_CODE (x);
499   if (code == SYMBOL_REF || code == LABEL_REF)
500     return x;
501   if (GET_RTX_CLASS (code) == 'o')
502     return 0;
503 
504   fmt = GET_RTX_FORMAT (code);
505   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
506     {
507       rtx t;
508 
509       if (fmt[i] == 'e')
510 	{
511 	  t = find_symbolic_term (XEXP (x, i));
512 	  if (t != 0)
513 	    return t;
514 	}
515       else if (fmt[i] == 'E')
516 	break;
517     }
518   return 0;
519 }
520 
521 /* Return nonzero if X and Y (memory addresses) could reference the
522    same location in memory.  C is an offset accumulator.  When
523    C is nonzero, we are testing aliases between X and Y + C.
524    XSIZE is the size in bytes of the X reference,
525    similarly YSIZE is the size in bytes for Y.
526 
527    If XSIZE or YSIZE is zero, we do not know the amount of memory being
528    referenced (the reference was BLKmode), so make the most pessimistic
529    assumptions.
530 
531    We recognize the following cases of non-conflicting memory:
532 
533 	(1) addresses involving the frame pointer cannot conflict
534 	    with addresses involving static variables.
535 	(2) static variables with different addresses cannot conflict.
536 
537    Nice to notice that varying addresses cannot conflict with fp if no
538    local variables had their addresses taken, but that's too hard now.  */
539 
540 /* ??? In Fortran, references to a array parameter can never conflict with
541    another array parameter.  */
542 
543 static int
memrefs_conflict_p(xsize,x,ysize,y,c)544 memrefs_conflict_p (xsize, x, ysize, y, c)
545      rtx x, y;
546      int xsize, ysize;
547      HOST_WIDE_INT c;
548 {
549   if (GET_CODE (x) == HIGH)
550     x = XEXP (x, 0);
551   else if (GET_CODE (x) == LO_SUM)
552     x = XEXP (x, 1);
553   else
554     x = canon_rtx (x);
555   if (GET_CODE (y) == HIGH)
556     y = XEXP (y, 0);
557   else if (GET_CODE (y) == LO_SUM)
558     y = XEXP (y, 1);
559   else
560     y = canon_rtx (y);
561 
562   if (rtx_equal_for_memref_p (x, y))
563     return (xsize == 0 || ysize == 0 ||
564 	    (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
565 
566   if (y == frame_pointer_rtx || y == stack_pointer_rtx)
567     {
568       rtx t = y;
569       int tsize = ysize;
570       y = x; ysize = xsize;
571       x = t; xsize = tsize;
572     }
573 
574   if (x == frame_pointer_rtx || x == stack_pointer_rtx)
575     {
576       rtx y1;
577 
578       if (CONSTANT_P (y))
579 	return 0;
580 
581       if (GET_CODE (y) == PLUS
582 	  && canon_rtx (XEXP (y, 0)) == x
583 	  && (y1 = canon_rtx (XEXP (y, 1)))
584 	  && GET_CODE (y1) == CONST_INT)
585 	{
586 	  c += INTVAL (y1);
587 	  return (xsize == 0 || ysize == 0
588 		  || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
589 	}
590 
591       if (GET_CODE (y) == PLUS
592 	  && (y1 = canon_rtx (XEXP (y, 0)))
593 	  && CONSTANT_P (y1))
594 	return 0;
595 
596       return 1;
597     }
598 
599   if (GET_CODE (x) == PLUS)
600     {
601       /* The fact that X is canonicalized means that this
602 	 PLUS rtx is canonicalized.  */
603       rtx x0 = XEXP (x, 0);
604       rtx x1 = XEXP (x, 1);
605 
606       if (GET_CODE (y) == PLUS)
607 	{
608 	  /* The fact that Y is canonicalized means that this
609 	     PLUS rtx is canonicalized.  */
610 	  rtx y0 = XEXP (y, 0);
611 	  rtx y1 = XEXP (y, 1);
612 
613 	  if (rtx_equal_for_memref_p (x1, y1))
614 	    return memrefs_conflict_p (xsize, x0, ysize, y0, c);
615 	  if (rtx_equal_for_memref_p (x0, y0))
616 	    return memrefs_conflict_p (xsize, x1, ysize, y1, c);
617 	  if (GET_CODE (x1) == CONST_INT)
618 	    if (GET_CODE (y1) == CONST_INT)
619 	      return memrefs_conflict_p (xsize, x0, ysize, y0,
620 					 c - INTVAL (x1) + INTVAL (y1));
621 	    else
622 	      return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
623 	  else if (GET_CODE (y1) == CONST_INT)
624 	    return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
625 
626 	  /* Handle case where we cannot understand iteration operators,
627 	     but we notice that the base addresses are distinct objects.  */
628 	  x = find_symbolic_term (x);
629 	  if (x == 0)
630 	    return 1;
631 	  y = find_symbolic_term (y);
632 	  if (y == 0)
633 	    return 1;
634 	  return rtx_equal_for_memref_p (x, y);
635 	}
636       else if (GET_CODE (x1) == CONST_INT)
637 	return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
638     }
639   else if (GET_CODE (y) == PLUS)
640     {
641       /* The fact that Y is canonicalized means that this
642 	 PLUS rtx is canonicalized.  */
643       rtx y0 = XEXP (y, 0);
644       rtx y1 = XEXP (y, 1);
645 
646       if (GET_CODE (y1) == CONST_INT)
647 	return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
648       else
649 	return 1;
650     }
651 
652   if (GET_CODE (x) == GET_CODE (y))
653     switch (GET_CODE (x))
654       {
655       case MULT:
656 	{
657 	  /* Handle cases where we expect the second operands to be the
658 	     same, and check only whether the first operand would conflict
659 	     or not.  */
660 	  rtx x0, y0;
661 	  rtx x1 = canon_rtx (XEXP (x, 1));
662 	  rtx y1 = canon_rtx (XEXP (y, 1));
663 	  if (! rtx_equal_for_memref_p (x1, y1))
664 	    return 1;
665 	  x0 = canon_rtx (XEXP (x, 0));
666 	  y0 = canon_rtx (XEXP (y, 0));
667 	  if (rtx_equal_for_memref_p (x0, y0))
668 	    return (xsize == 0 || ysize == 0
669 		    || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
670 
671 	  /* Can't properly adjust our sizes.  */
672 	  if (GET_CODE (x1) != CONST_INT)
673 	    return 1;
674 	  xsize /= INTVAL (x1);
675 	  ysize /= INTVAL (x1);
676 	  c /= INTVAL (x1);
677 	  return memrefs_conflict_p (xsize, x0, ysize, y0, c);
678 	}
679       }
680 
681   if (CONSTANT_P (x))
682     {
683       if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT)
684 	{
685 	  c += (INTVAL (y) - INTVAL (x));
686 	  return (xsize == 0 || ysize == 0
687 		  || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
688 	}
689 
690       if (GET_CODE (x) == CONST)
691 	{
692 	  if (GET_CODE (y) == CONST)
693 	    return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
694 				       ysize, canon_rtx (XEXP (y, 0)), c);
695 	  else
696 	    return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
697 				       ysize, y, c);
698 	}
699       if (GET_CODE (y) == CONST)
700 	return memrefs_conflict_p (xsize, x, ysize,
701 				   canon_rtx (XEXP (y, 0)), c);
702 
703       if (CONSTANT_P (y))
704 	return (rtx_equal_for_memref_p (x, y)
705 		&& (xsize == 0 || ysize == 0
706 		    || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)));
707 
708       return 1;
709     }
710   return 1;
711 }
712 
713 /* Functions to compute memory dependencies.
714 
715    Since we process the insns in execution order, we can build tables
716    to keep track of what registers are fixed (and not aliased), what registers
717    are varying in known ways, and what registers are varying in unknown
718    ways.
719 
720    If both memory references are volatile, then there must always be a
721    dependence between the two references, since their order can not be
722    changed.  A volatile and non-volatile reference can be interchanged
723    though.
724 
725    A MEM_IN_STRUCT reference at a non-QImode varying address can never
726    conflict with a non-MEM_IN_STRUCT reference at a fixed address.   We must
727    allow QImode aliasing because the ANSI C standard allows character
728    pointers to alias anything.  We are assuming that characters are
729    always QImode here.  */
730 
731 /* Read dependence: X is read after read in MEM takes place.  There can
732    only be a dependence here if both reads are volatile.  */
733 
734 int
read_dependence(mem,x)735 read_dependence (mem, x)
736      rtx mem;
737      rtx x;
738 {
739   return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
740 }
741 
742 /* True dependence: X is read after store in MEM takes place.  */
743 
744 int
true_dependence(mem,x)745 true_dependence (mem, x)
746      rtx mem;
747      rtx x;
748 {
749   /* If X is an unchanging read, then it can't possibly conflict with any
750      non-unchanging store.  It may conflict with an unchanging write though,
751      because there may be a single store to this address to initialize it.
752      Just fall through to the code below to resolve the case where we have
753      both an unchanging read and an unchanging write.  This won't handle all
754      cases optimally, but the possible performance loss should be
755      negligible.  */
756   if (RTX_UNCHANGING_P (x) && ! RTX_UNCHANGING_P (mem))
757     return 0;
758 
759   return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
760 	  || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
761 				  SIZE_FOR_MODE (x), XEXP (x, 0), 0)
762 	      && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
763 		    && GET_MODE (mem) != QImode
764 		    && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
765 	      && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
766 		    && GET_MODE (x) != QImode
767 		    && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
768 }
769 
770 /* Anti dependence: X is written after read in MEM takes place.  */
771 
772 int
anti_dependence(mem,x)773 anti_dependence (mem, x)
774      rtx mem;
775      rtx x;
776 {
777   /* If MEM is an unchanging read, then it can't possibly conflict with
778      the store to X, because there is at most one store to MEM, and it must
779      have occured somewhere before MEM.  */
780   if (RTX_UNCHANGING_P (mem))
781     return 0;
782 
783   return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
784 	  || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
785 				  SIZE_FOR_MODE (x), XEXP (x, 0), 0)
786 	      && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
787 		    && GET_MODE (mem) != QImode
788 		    && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
789 	      && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
790 		    && GET_MODE (x) != QImode
791 		    && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
792 }
793 
794 /* Output dependence: X is written after store in MEM takes place.  */
795 
796 int
output_dependence(mem,x)797 output_dependence (mem, x)
798      rtx mem;
799      rtx x;
800 {
801   return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
802 	  || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
803 				  SIZE_FOR_MODE (x), XEXP (x, 0), 0)
804 	      && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
805 		    && GET_MODE (mem) != QImode
806 		    && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
807 	      && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
808 		    && GET_MODE (x) != QImode
809 		    && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
810 }
811 
812 /* Helper functions for instruction scheduling.  */
813 
814 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
815    LOG_LINKS of INSN, if not already there.  DEP_TYPE indicates the type
816    of dependence that this link represents.  */
817 
818 void
add_dependence(insn,elem,dep_type)819 add_dependence (insn, elem, dep_type)
820      rtx insn;
821      rtx elem;
822      enum reg_note dep_type;
823 {
824   rtx link, next;
825 
826   /* Don't depend an insn on itself.  */
827   if (insn == elem)
828     return;
829 
830   /* If elem is part of a sequence that must be scheduled together, then
831      make the dependence point to the last insn of the sequence.
832      When HAVE_cc0, it is possible for NOTEs to exist between users and
833      setters of the condition codes, so we must skip past notes here.
834      Otherwise, NOTEs are impossible here.  */
835 
836   next = NEXT_INSN (elem);
837 
838 #ifdef HAVE_cc0
839   while (next && GET_CODE (next) == NOTE)
840     next = NEXT_INSN (next);
841 #endif
842 
843   if (next && SCHED_GROUP_P (next))
844     {
845       /* Notes will never intervene here though, so don't bother checking
846 	 for them.  */
847       /* We must reject CODE_LABELs, so that we don't get confused by one
848 	 that has LABEL_PRESERVE_P set, which is represented by the same
849 	 bit in the rtl as SCHED_GROUP_P.  A CODE_LABEL can never be
850 	 SCHED_GROUP_P.  */
851       while (NEXT_INSN (next) && SCHED_GROUP_P (NEXT_INSN (next))
852 	     && GET_CODE (NEXT_INSN (next)) != CODE_LABEL)
853 	next = NEXT_INSN (next);
854 
855       /* Again, don't depend an insn on itself.  */
856       if (insn == next)
857 	return;
858 
859       /* Make the dependence to NEXT, the last insn of the group, instead
860 	 of the original ELEM.  */
861       elem = next;
862     }
863 
864   /* Check that we don't already have this dependence.  */
865   for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
866     if (XEXP (link, 0) == elem)
867       {
868 	/* If this is a more restrictive type of dependence than the existing
869 	   one, then change the existing dependence to this type.  */
870 	if ((int) dep_type < (int) REG_NOTE_KIND (link))
871 	  PUT_REG_NOTE_KIND (link, dep_type);
872 	return;
873       }
874   /* Might want to check one level of transitivity to save conses.  */
875 
876   link = rtx_alloc (INSN_LIST);
877   /* Insn dependency, not data dependency.  */
878   PUT_REG_NOTE_KIND (link, dep_type);
879   XEXP (link, 0) = elem;
880   XEXP (link, 1) = LOG_LINKS (insn);
881   LOG_LINKS (insn) = link;
882 }
883 
884 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
885    of INSN.  Abort if not found.  */
886 void
remove_dependence(insn,elem)887 remove_dependence (insn, elem)
888      rtx insn;
889      rtx elem;
890 {
891   rtx prev, link;
892   int found = 0;
893 
894   for (prev = 0, link = LOG_LINKS (insn); link;
895        prev = link, link = XEXP (link, 1))
896     {
897       if (XEXP (link, 0) == elem)
898 	{
899 	  if (prev)
900 	    XEXP (prev, 1) = XEXP (link, 1);
901 	  else
902 	    LOG_LINKS (insn) = XEXP (link, 1);
903 	  found = 1;
904 	}
905     }
906 
907   if (! found)
908     abort ();
909   return;
910 }
911 
912 #ifndef INSN_SCHEDULING
schedule_insns()913 void schedule_insns () {}
914 #else
915 #ifndef __GNUC__
916 #define __inline
917 #endif
918 
919 /* Computation of memory dependencies.  */
920 
921 /* The *_insns and *_mems are paired lists.  Each pending memory operation
922    will have a pointer to the MEM rtx on one list and a pointer to the
923    containing insn on the other list in the same place in the list.  */
924 
925 /* We can't use add_dependence like the old code did, because a single insn
926    may have multiple memory accesses, and hence needs to be on the list
927    once for each memory access.  Add_dependence won't let you add an insn
928    to a list more than once.  */
929 
930 /* An INSN_LIST containing all insns with pending read operations.  */
931 static rtx pending_read_insns;
932 
933 /* An EXPR_LIST containing all MEM rtx's which are pending reads.  */
934 static rtx pending_read_mems;
935 
936 /* An INSN_LIST containing all insns with pending write operations.  */
937 static rtx pending_write_insns;
938 
939 /* An EXPR_LIST containing all MEM rtx's which are pending writes.  */
940 static rtx pending_write_mems;
941 
942 /* Indicates the combined length of the two pending lists.  We must prevent
943    these lists from ever growing too large since the number of dependencies
944    produced is at least O(N*N), and execution time is at least O(4*N*N), as
945    a function of the length of these pending lists.  */
946 
947 static int pending_lists_length;
948 
949 /* An INSN_LIST containing all INSN_LISTs allocated but currently unused.  */
950 
951 static rtx unused_insn_list;
952 
953 /* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused.  */
954 
955 static rtx unused_expr_list;
956 
957 /* The last insn upon which all memory references must depend.
958    This is an insn which flushed the pending lists, creating a dependency
959    between it and all previously pending memory references.  This creates
960    a barrier (or a checkpoint) which no memory reference is allowed to cross.
961 
962    This includes all non constant CALL_INSNs.  When we do interprocedural
963    alias analysis, this restriction can be relaxed.
964    This may also be an INSN that writes memory if the pending lists grow
965    too large.  */
966 
967 static rtx last_pending_memory_flush;
968 
969 /* The last function call we have seen.  All hard regs, and, of course,
970    the last function call, must depend on this.  */
971 
972 static rtx last_function_call;
973 
974 /* The LOG_LINKS field of this is a list of insns which use a pseudo register
975    that does not already cross a call.  We create dependencies between each
976    of those insn and the next call insn, to ensure that they won't cross a call
977    after scheduling is done.  */
978 
979 static rtx sched_before_next_call;
980 
981 /* Pointer to the last instruction scheduled.  Used by rank_for_schedule,
982    so that insns independent of the last scheduled insn will be preferred
983    over dependent instructions.  */
984 
985 static rtx last_scheduled_insn;
986 
987 /* Process an insn's memory dependencies.  There are four kinds of
988    dependencies:
989 
990    (0) read dependence: read follows read
991    (1) true dependence: read follows write
992    (2) anti dependence: write follows read
993    (3) output dependence: write follows write
994 
995    We are careful to build only dependencies which actually exist, and
996    use transitivity to avoid building too many links.  */
997 
998 /* Return the INSN_LIST containing INSN in LIST, or NULL
999    if LIST does not contain INSN.  */
1000 
1001 __inline static rtx
find_insn_list(insn,list)1002 find_insn_list (insn, list)
1003      rtx insn;
1004      rtx list;
1005 {
1006   while (list)
1007     {
1008       if (XEXP (list, 0) == insn)
1009 	return list;
1010       list = XEXP (list, 1);
1011     }
1012   return 0;
1013 }
1014 
1015 /* Compute the function units used by INSN.  This caches the value
1016    returned by function_units_used.  A function unit is encoded as the
1017    unit number if the value is non-negative and the compliment of a
1018    mask if the value is negative.  A function unit index is the
1019    non-negative encoding.  */
1020 
1021 __inline static int
insn_unit(insn)1022 insn_unit (insn)
1023      rtx insn;
1024 {
1025   register int unit = INSN_UNIT (insn);
1026 
1027   if (unit == 0)
1028     {
1029       recog_memoized (insn);
1030 
1031       /* A USE insn, or something else we don't need to understand.
1032 	 We can't pass these directly to function_units_used because it will
1033 	 trigger a fatal error for unrecognizable insns.  */
1034       if (INSN_CODE (insn) < 0)
1035 	unit = -1;
1036       else
1037 	{
1038 	  unit = function_units_used (insn);
1039 	  /* Increment non-negative values so we can cache zero.  */
1040 	  if (unit >= 0) unit++;
1041 	}
1042       /* We only cache 16 bits of the result, so if the value is out of
1043 	 range, don't cache it.  */
1044       if (FUNCTION_UNITS_SIZE < HOST_BITS_PER_SHORT
1045 	  || unit >= 0
1046 	  || (~unit & ((1 << (HOST_BITS_PER_SHORT - 1)) - 1)) == 0)
1047       INSN_UNIT (insn) = unit;
1048     }
1049   return (unit > 0 ? unit - 1 : unit);
1050 }
1051 
1052 /* Compute the blockage range for executing INSN on UNIT.  This caches
1053    the value returned by the blockage_range_function for the unit.
1054    These values are encoded in an int where the upper half gives the
1055    minimum value and the lower half gives the maximum value.  */
1056 
1057 __inline static unsigned int
blockage_range(unit,insn)1058 blockage_range (unit, insn)
1059      int unit;
1060      rtx insn;
1061 {
1062   unsigned int blockage = INSN_BLOCKAGE (insn);
1063   unsigned int range;
1064 
1065   if (UNIT_BLOCKED (blockage) != unit + 1)
1066     {
1067       range = function_units[unit].blockage_range_function (insn);
1068       /* We only cache the blockage range for one unit and then only if
1069 	 the values fit.  */
1070       if (HOST_BITS_PER_INT >= UNIT_BITS + 2 * BLOCKAGE_BITS)
1071 	INSN_BLOCKAGE (insn) = ENCODE_BLOCKAGE (unit + 1, range);
1072     }
1073   else
1074     range = BLOCKAGE_RANGE (blockage);
1075 
1076   return range;
1077 }
1078 
1079 /* A vector indexed by function unit instance giving the last insn to use
1080    the unit.  The value of the function unit instance index for unit U
1081    instance I is (U + I * FUNCTION_UNITS_SIZE).  */
1082 static rtx unit_last_insn[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
1083 
1084 /* A vector indexed by function unit instance giving the minimum time when
1085    the unit will unblock based on the maximum blockage cost.  */
1086 static int unit_tick[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
1087 
1088 /* A vector indexed by function unit number giving the number of insns
1089    that remain to use the unit.  */
1090 static int unit_n_insns[FUNCTION_UNITS_SIZE];
1091 
1092 /* Reset the function unit state to the null state.  */
1093 
1094 static void
clear_units()1095 clear_units ()
1096 {
1097   int unit;
1098 
1099   bzero (unit_last_insn, sizeof (unit_last_insn));
1100   bzero (unit_tick, sizeof (unit_tick));
1101   bzero (unit_n_insns, sizeof (unit_n_insns));
1102 }
1103 
1104 /* Record an insn as one that will use the units encoded by UNIT.  */
1105 
1106 __inline static void
prepare_unit(unit)1107 prepare_unit (unit)
1108      int unit;
1109 {
1110   int i;
1111 
1112   if (unit >= 0)
1113     unit_n_insns[unit]++;
1114   else
1115     for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
1116       if ((unit & 1) != 0)
1117 	prepare_unit (i);
1118 }
1119 
1120 /* Return the actual hazard cost of executing INSN on the unit UNIT,
1121    instance INSTANCE at time CLOCK if the previous actual hazard cost
1122    was COST.  */
1123 
1124 __inline static int
actual_hazard_this_instance(unit,instance,insn,clock,cost)1125 actual_hazard_this_instance (unit, instance, insn, clock, cost)
1126      int unit, instance, clock, cost;
1127      rtx insn;
1128 {
1129   int i;
1130   int tick = unit_tick[instance];
1131 
1132   if (tick - clock > cost)
1133     {
1134       /* The scheduler is operating in reverse, so INSN is the executing
1135 	 insn and the unit's last insn is the candidate insn.  We want a
1136 	 more exact measure of the blockage if we execute INSN at CLOCK
1137 	 given when we committed the execution of the unit's last insn.
1138 
1139 	 The blockage value is given by either the unit's max blockage
1140 	 constant, blockage range function, or blockage function.  Use
1141 	 the most exact form for the given unit.  */
1142 
1143       if (function_units[unit].blockage_range_function)
1144 	{
1145 	  if (function_units[unit].blockage_function)
1146 	    tick += (function_units[unit].blockage_function
1147 		     (insn, unit_last_insn[instance])
1148 		     - function_units[unit].max_blockage);
1149 	  else
1150 	    tick += ((int) MAX_BLOCKAGE_COST (blockage_range (unit, insn))
1151 		     - function_units[unit].max_blockage);
1152 	}
1153       if (tick - clock > cost)
1154 	cost = tick - clock;
1155     }
1156   return cost;
1157 }
1158 
1159 /* Record INSN as having begun execution on the units encoded by UNIT at
1160    time CLOCK.  */
1161 
1162 __inline static void
schedule_unit(unit,insn,clock)1163 schedule_unit (unit, insn, clock)
1164      int unit, clock;
1165      rtx insn;
1166 {
1167   int i;
1168 
1169   if (unit >= 0)
1170     {
1171       int instance = unit;
1172 #if MAX_MULTIPLICITY > 1
1173       /* Find the first free instance of the function unit and use that
1174 	 one.  We assume that one is free.  */
1175       for (i = function_units[unit].multiplicity - 1; i > 0; i--)
1176 	{
1177 	  if (! actual_hazard_this_instance (unit, instance, insn, clock, 0))
1178 	    break;
1179 	  instance += FUNCTION_UNITS_SIZE;
1180 	}
1181 #endif
1182       unit_last_insn[instance] = insn;
1183       unit_tick[instance] = (clock + function_units[unit].max_blockage);
1184     }
1185   else
1186     for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
1187       if ((unit & 1) != 0)
1188 	schedule_unit (i, insn, clock);
1189 }
1190 
1191 /* Return the actual hazard cost of executing INSN on the units encoded by
1192    UNIT at time CLOCK if the previous actual hazard cost was COST.  */
1193 
1194 __inline static int
actual_hazard(unit,insn,clock,cost)1195 actual_hazard (unit, insn, clock, cost)
1196      int unit, clock, cost;
1197      rtx insn;
1198 {
1199   int i;
1200 
1201   if (unit >= 0)
1202     {
1203       /* Find the instance of the function unit with the minimum hazard.  */
1204       int instance = unit;
1205       int best = instance;
1206       int best_cost = actual_hazard_this_instance (unit, instance, insn,
1207 						   clock, cost);
1208       int this_cost;
1209 
1210 #if MAX_MULTIPLICITY > 1
1211       if (best_cost > cost)
1212 	{
1213 	  for (i = function_units[unit].multiplicity - 1; i > 0; i--)
1214 	    {
1215 	      instance += FUNCTION_UNITS_SIZE;
1216 	      this_cost = actual_hazard_this_instance (unit, instance, insn,
1217 						       clock, cost);
1218 	      if (this_cost < best_cost)
1219 		{
1220 		  best = instance;
1221 		  best_cost = this_cost;
1222 		  if (this_cost <= cost)
1223 		    break;
1224 		}
1225 	    }
1226 	}
1227 #endif
1228       cost = MAX (cost, best_cost);
1229     }
1230   else
1231     for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
1232       if ((unit & 1) != 0)
1233 	cost = actual_hazard (i, insn, clock, cost);
1234 
1235   return cost;
1236 }
1237 
1238 /* Return the potential hazard cost of executing an instruction on the
1239    units encoded by UNIT if the previous potential hazard cost was COST.
1240    An insn with a large blockage time is chosen in preference to one
1241    with a smaller time; an insn that uses a unit that is more likely
1242    to be used is chosen in preference to one with a unit that is less
1243    used.  We are trying to minimize a subsequent actual hazard.  */
1244 
1245 __inline static int
potential_hazard(unit,insn,cost)1246 potential_hazard (unit, insn, cost)
1247      int unit, cost;
1248      rtx insn;
1249 {
1250   int i, ncost;
1251   unsigned int minb, maxb;
1252 
1253   if (unit >= 0)
1254     {
1255       minb = maxb = function_units[unit].max_blockage;
1256       if (maxb > 1)
1257 	{
1258 	  if (function_units[unit].blockage_range_function)
1259 	    {
1260 	      maxb = minb = blockage_range (unit, insn);
1261 	      maxb = MAX_BLOCKAGE_COST (maxb);
1262 	      minb = MIN_BLOCKAGE_COST (minb);
1263 	    }
1264 
1265 	  if (maxb > 1)
1266 	    {
1267 	      /* Make the number of instructions left dominate.  Make the
1268 		 minimum delay dominate the maximum delay.  If all these
1269 		 are the same, use the unit number to add an arbitrary
1270 		 ordering.  Other terms can be added.  */
1271 	      ncost = minb * 0x40 + maxb;
1272 	      ncost *= (unit_n_insns[unit] - 1) * 0x1000 + unit;
1273 	      if (ncost > cost)
1274 		cost = ncost;
1275 	    }
1276 	}
1277     }
1278   else
1279     for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
1280       if ((unit & 1) != 0)
1281 	cost = potential_hazard (i, insn, cost);
1282 
1283   return cost;
1284 }
1285 
1286 /* Compute cost of executing INSN given the dependence LINK on the insn USED.
1287    This is the number of virtual cycles taken between instruction issue and
1288    instruction results.  */
1289 
1290 __inline static int
insn_cost(insn,link,used)1291 insn_cost (insn, link, used)
1292      rtx insn, link, used;
1293 {
1294   register int cost = INSN_COST (insn);
1295 
1296   if (cost == 0)
1297     {
1298       recog_memoized (insn);
1299 
1300       /* A USE insn, or something else we don't need to understand.
1301 	 We can't pass these directly to result_ready_cost because it will
1302 	 trigger a fatal error for unrecognizable insns.  */
1303       if (INSN_CODE (insn) < 0)
1304 	{
1305 	  INSN_COST (insn) = 1;
1306 	  return 1;
1307 	}
1308       else
1309 	{
1310 	  cost = result_ready_cost (insn);
1311 
1312 	  if (cost < 1)
1313 	    cost = 1;
1314 
1315 	  INSN_COST (insn) = cost;
1316 	}
1317     }
1318 
1319   /* A USE insn should never require the value used to be computed.  This
1320      allows the computation of a function's result and parameter values to
1321      overlap the return and call.  */
1322   recog_memoized (used);
1323   if (INSN_CODE (used) < 0)
1324     LINK_COST_FREE (link) = 1;
1325 
1326   /* If some dependencies vary the cost, compute the adjustment.  Most
1327      commonly, the adjustment is complete: either the cost is ignored
1328      (in the case of an output- or anti-dependence), or the cost is
1329      unchanged.  These values are cached in the link as LINK_COST_FREE
1330      and LINK_COST_ZERO.  */
1331 
1332   if (LINK_COST_FREE (link))
1333     cost = 1;
1334 #ifdef ADJUST_COST
1335   else if (! LINK_COST_ZERO (link))
1336     {
1337       int ncost = cost;
1338 
1339       ADJUST_COST (used, link, insn, ncost);
1340       if (ncost <= 1)
1341 	LINK_COST_FREE (link) = ncost = 1;
1342       if (cost == ncost)
1343 	LINK_COST_ZERO (link) = 1;
1344       cost = ncost;
1345     }
1346 #endif
1347   return cost;
1348 }
1349 
1350 /* Compute the priority number for INSN.  */
1351 
1352 static int
priority(insn)1353 priority (insn)
1354      rtx insn;
1355 {
1356   if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
1357     {
1358       int prev_priority;
1359       int max_priority;
1360       int this_priority = INSN_PRIORITY (insn);
1361       rtx prev;
1362 
1363       if (this_priority > 0)
1364 	return this_priority;
1365 
1366       max_priority = 1;
1367 
1368       /* Nonzero if these insns must be scheduled together.  */
1369       if (SCHED_GROUP_P (insn))
1370 	{
1371 	  prev = insn;
1372 	  while (SCHED_GROUP_P (prev))
1373 	    {
1374 	      prev = PREV_INSN (prev);
1375 	      INSN_REF_COUNT (prev) += 1;
1376 	    }
1377 	}
1378 
1379       for (prev = LOG_LINKS (insn); prev; prev = XEXP (prev, 1))
1380 	{
1381 	  rtx x = XEXP (prev, 0);
1382 
1383 	  /* A dependence pointing to a note is always obsolete, because
1384 	     sched_analyze_insn will have created any necessary new dependences
1385 	     which replace it.  Notes can be created when instructions are
1386 	     deleted by insn splitting, or by register allocation.  */
1387 	  if (GET_CODE (x) == NOTE)
1388 	    {
1389 	      remove_dependence (insn, x);
1390 	      continue;
1391 	    }
1392 
1393 	  /* Clear the link cost adjustment bits.  */
1394 	  LINK_COST_FREE (prev) = 0;
1395 #ifdef ADJUST_COST
1396 	  LINK_COST_ZERO (prev) = 0;
1397 #endif
1398 
1399 	  /* This priority calculation was chosen because it results in the
1400 	     least instruction movement, and does not hurt the performance
1401 	     of the resulting code compared to the old algorithm.
1402 	     This makes the sched algorithm more stable, which results
1403 	     in better code, because there is less register pressure,
1404 	     cross jumping is more likely to work, and debugging is easier.
1405 
1406 	     When all instructions have a latency of 1, there is no need to
1407 	     move any instructions.  Subtracting one here ensures that in such
1408 	     cases all instructions will end up with a priority of one, and
1409 	     hence no scheduling will be done.
1410 
1411 	     The original code did not subtract the one, and added the
1412 	     insn_cost of the current instruction to its priority (e.g.
1413 	     move the insn_cost call down to the end).  */
1414 
1415 	  if (REG_NOTE_KIND (prev) == 0)
1416 	    /* Data dependence.  */
1417 	    prev_priority = priority (x) + insn_cost (x, prev, insn) - 1;
1418 	  else
1419 	    /* Anti or output dependence.  Don't add the latency of this
1420 	       insn's result, because it isn't being used.  */
1421 	    prev_priority = priority (x);
1422 
1423 	  if (prev_priority > max_priority)
1424 	    max_priority = prev_priority;
1425 	  INSN_REF_COUNT (x) += 1;
1426 	}
1427 
1428       prepare_unit (insn_unit (insn));
1429       INSN_PRIORITY (insn) = max_priority;
1430       return INSN_PRIORITY (insn);
1431     }
1432   return 0;
1433 }
1434 
1435 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
1436    them to the unused_*_list variables, so that they can be reused.  */
1437 
1438 static void
free_pending_lists()1439 free_pending_lists ()
1440 {
1441   register rtx link, prev_link;
1442 
1443   if (pending_read_insns)
1444     {
1445       prev_link = pending_read_insns;
1446       link = XEXP (prev_link, 1);
1447 
1448       while (link)
1449 	{
1450 	  prev_link = link;
1451 	  link = XEXP (link, 1);
1452 	}
1453 
1454       XEXP (prev_link, 1) = unused_insn_list;
1455       unused_insn_list = pending_read_insns;
1456       pending_read_insns = 0;
1457     }
1458 
1459   if (pending_write_insns)
1460     {
1461       prev_link = pending_write_insns;
1462       link = XEXP (prev_link, 1);
1463 
1464       while (link)
1465 	{
1466 	  prev_link = link;
1467 	  link = XEXP (link, 1);
1468 	}
1469 
1470       XEXP (prev_link, 1) = unused_insn_list;
1471       unused_insn_list = pending_write_insns;
1472       pending_write_insns = 0;
1473     }
1474 
1475   if (pending_read_mems)
1476     {
1477       prev_link = pending_read_mems;
1478       link = XEXP (prev_link, 1);
1479 
1480       while (link)
1481 	{
1482 	  prev_link = link;
1483 	  link = XEXP (link, 1);
1484 	}
1485 
1486       XEXP (prev_link, 1) = unused_expr_list;
1487       unused_expr_list = pending_read_mems;
1488       pending_read_mems = 0;
1489     }
1490 
1491   if (pending_write_mems)
1492     {
1493       prev_link = pending_write_mems;
1494       link = XEXP (prev_link, 1);
1495 
1496       while (link)
1497 	{
1498 	  prev_link = link;
1499 	  link = XEXP (link, 1);
1500 	}
1501 
1502       XEXP (prev_link, 1) = unused_expr_list;
1503       unused_expr_list = pending_write_mems;
1504       pending_write_mems = 0;
1505     }
1506 }
1507 
1508 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1509    The MEM is a memory reference contained within INSN, which we are saving
1510    so that we can do memory aliasing on it.  */
1511 
1512 static void
add_insn_mem_dependence(insn_list,mem_list,insn,mem)1513 add_insn_mem_dependence (insn_list, mem_list, insn, mem)
1514      rtx *insn_list, *mem_list, insn, mem;
1515 {
1516   register rtx link;
1517 
1518   if (unused_insn_list)
1519     {
1520       link = unused_insn_list;
1521       unused_insn_list = XEXP (link, 1);
1522     }
1523   else
1524     link = rtx_alloc (INSN_LIST);
1525   XEXP (link, 0) = insn;
1526   XEXP (link, 1) = *insn_list;
1527   *insn_list = link;
1528 
1529   if (unused_expr_list)
1530     {
1531       link = unused_expr_list;
1532       unused_expr_list = XEXP (link, 1);
1533     }
1534   else
1535     link = rtx_alloc (EXPR_LIST);
1536   XEXP (link, 0) = mem;
1537   XEXP (link, 1) = *mem_list;
1538   *mem_list = link;
1539 
1540   pending_lists_length++;
1541 }
1542 
1543 /* Make a dependency between every memory reference on the pending lists
1544    and INSN, thus flushing the pending lists.  */
1545 
1546 static void
flush_pending_lists(insn)1547 flush_pending_lists (insn)
1548      rtx insn;
1549 {
1550   rtx link;
1551 
1552   while (pending_read_insns)
1553     {
1554       add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
1555 
1556       link = pending_read_insns;
1557       pending_read_insns = XEXP (pending_read_insns, 1);
1558       XEXP (link, 1) = unused_insn_list;
1559       unused_insn_list = link;
1560 
1561       link = pending_read_mems;
1562       pending_read_mems = XEXP (pending_read_mems, 1);
1563       XEXP (link, 1) = unused_expr_list;
1564       unused_expr_list = link;
1565     }
1566   while (pending_write_insns)
1567     {
1568       add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
1569 
1570       link = pending_write_insns;
1571       pending_write_insns = XEXP (pending_write_insns, 1);
1572       XEXP (link, 1) = unused_insn_list;
1573       unused_insn_list = link;
1574 
1575       link = pending_write_mems;
1576       pending_write_mems = XEXP (pending_write_mems, 1);
1577       XEXP (link, 1) = unused_expr_list;
1578       unused_expr_list = link;
1579     }
1580   pending_lists_length = 0;
1581 
1582   if (last_pending_memory_flush)
1583     add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1584 
1585   last_pending_memory_flush = insn;
1586 }
1587 
1588 /* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated
1589    by the write to the destination of X, and reads of everything mentioned.  */
1590 
1591 static void
sched_analyze_1(x,insn)1592 sched_analyze_1 (x, insn)
1593      rtx x;
1594      rtx insn;
1595 {
1596   register int regno;
1597   register rtx dest = SET_DEST (x);
1598 
1599   if (dest == 0)
1600     return;
1601 
1602   while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
1603 	 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1604     {
1605       if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1606 	{
1607 	  /* The second and third arguments are values read by this insn.  */
1608 	  sched_analyze_2 (XEXP (dest, 1), insn);
1609 	  sched_analyze_2 (XEXP (dest, 2), insn);
1610 	}
1611       dest = SUBREG_REG (dest);
1612     }
1613 
1614   if (GET_CODE (dest) == REG)
1615     {
1616       register int offset, bit, i;
1617 
1618       regno = REGNO (dest);
1619 
1620       /* A hard reg in a wide mode may really be multiple registers.
1621 	 If so, mark all of them just like the first.  */
1622       if (regno < FIRST_PSEUDO_REGISTER)
1623 	{
1624 	  i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
1625 	  while (--i >= 0)
1626 	    {
1627 	      rtx u;
1628 
1629 	      for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1))
1630 		add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1631 	      reg_last_uses[regno + i] = 0;
1632 	      if (reg_last_sets[regno + i])
1633 		add_dependence (insn, reg_last_sets[regno + i],
1634 				REG_DEP_OUTPUT);
1635 	      reg_last_sets[regno + i] = insn;
1636 	      if ((call_used_regs[i] || global_regs[i])
1637 		  && last_function_call)
1638 		/* Function calls clobber all call_used regs.  */
1639 		add_dependence (insn, last_function_call, REG_DEP_ANTI);
1640 	    }
1641 	}
1642       else
1643 	{
1644 	  rtx u;
1645 
1646 	  for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
1647 	    add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1648 	  reg_last_uses[regno] = 0;
1649 	  if (reg_last_sets[regno])
1650 	    add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT);
1651 	  reg_last_sets[regno] = insn;
1652 
1653 	  /* Pseudos that are REG_EQUIV to something may be replaced
1654 	     by that during reloading.  We need only add dependencies for
1655 	     the address in the REG_EQUIV note.  */
1656 	  if (! reload_completed
1657 	      && reg_known_equiv_p[regno]
1658 	      && GET_CODE (reg_known_value[regno]) == MEM)
1659 	    sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
1660 
1661 	  /* Don't let it cross a call after scheduling if it doesn't
1662 	     already cross one.  */
1663 	  if (reg_n_calls_crossed[regno] == 0 && last_function_call)
1664 	    add_dependence (insn, last_function_call, REG_DEP_ANTI);
1665 	}
1666     }
1667   else if (GET_CODE (dest) == MEM)
1668     {
1669       /* Writing memory.  */
1670 
1671       if (pending_lists_length > 32)
1672 	{
1673 	  /* Flush all pending reads and writes to prevent the pending lists
1674 	     from getting any larger.  Insn scheduling runs too slowly when
1675 	     these lists get long.  The number 32 was chosen because it
1676 	     seems like a reasonable number.  When compiling GCC with itself,
1677 	     this flush occurs 8 times for sparc, and 10 times for m88k using
1678 	     the number 32.  */
1679 	  flush_pending_lists (insn);
1680 	}
1681       else
1682 	{
1683 	  rtx pending, pending_mem;
1684 
1685 	  pending = pending_read_insns;
1686 	  pending_mem = pending_read_mems;
1687 	  while (pending)
1688 	    {
1689 	      /* If a dependency already exists, don't create a new one.  */
1690 	      if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1691 		if (anti_dependence (XEXP (pending_mem, 0), dest))
1692 		  add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1693 
1694 	      pending = XEXP (pending, 1);
1695 	      pending_mem = XEXP (pending_mem, 1);
1696 	    }
1697 
1698 	  pending = pending_write_insns;
1699 	  pending_mem = pending_write_mems;
1700 	  while (pending)
1701 	    {
1702 	      /* If a dependency already exists, don't create a new one.  */
1703 	      if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1704 		if (output_dependence (XEXP (pending_mem, 0), dest))
1705 		  add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1706 
1707 	      pending = XEXP (pending, 1);
1708 	      pending_mem = XEXP (pending_mem, 1);
1709 	    }
1710 
1711 	  if (last_pending_memory_flush)
1712 	    add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1713 
1714 	  add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
1715 				   insn, dest);
1716 	}
1717       sched_analyze_2 (XEXP (dest, 0), insn);
1718     }
1719 
1720   /* Analyze reads.  */
1721   if (GET_CODE (x) == SET)
1722     sched_analyze_2 (SET_SRC (x), insn);
1723 }
1724 
1725 /* Analyze the uses of memory and registers in rtx X in INSN.  */
1726 
1727 static void
sched_analyze_2(x,insn)1728 sched_analyze_2 (x, insn)
1729      rtx x;
1730      rtx insn;
1731 {
1732   register int i;
1733   register int j;
1734   register enum rtx_code code;
1735   register char *fmt;
1736 
1737   if (x == 0)
1738     return;
1739 
1740   code = GET_CODE (x);
1741 
1742   switch (code)
1743     {
1744     case CONST_INT:
1745     case CONST_DOUBLE:
1746     case SYMBOL_REF:
1747     case CONST:
1748     case LABEL_REF:
1749       /* Ignore constants.  Note that we must handle CONST_DOUBLE here
1750 	 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
1751 	 this does not mean that this insn is using cc0.  */
1752       return;
1753 
1754 #ifdef HAVE_cc0
1755     case CC0:
1756       {
1757 	rtx link, prev;
1758 
1759 	/* There may be a note before this insn now, but all notes will
1760 	   be removed before we actually try to schedule the insns, so
1761 	   it won't cause a problem later.  We must avoid it here though.  */
1762 
1763 	/* User of CC0 depends on immediately preceding insn.  */
1764 	SCHED_GROUP_P (insn) = 1;
1765 
1766 	/* Make a copy of all dependencies on the immediately previous insn,
1767 	   and add to this insn.  This is so that all the dependencies will
1768 	   apply to the group.  Remove an explicit dependence on this insn
1769 	   as SCHED_GROUP_P now represents it.  */
1770 
1771 	prev = PREV_INSN (insn);
1772 	while (GET_CODE (prev) == NOTE)
1773 	  prev = PREV_INSN (prev);
1774 
1775 	if (find_insn_list (prev, LOG_LINKS (insn)))
1776 	  remove_dependence (insn, prev);
1777 
1778 	for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
1779 	  add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
1780 
1781 	return;
1782       }
1783 #endif
1784 
1785     case REG:
1786       {
1787 	int regno = REGNO (x);
1788 	if (regno < FIRST_PSEUDO_REGISTER)
1789 	  {
1790 	    int i;
1791 
1792 	    i = HARD_REGNO_NREGS (regno, GET_MODE (x));
1793 	    while (--i >= 0)
1794 	      {
1795 		reg_last_uses[regno + i]
1796 		  = gen_rtx (INSN_LIST, VOIDmode,
1797 			     insn, reg_last_uses[regno + i]);
1798 		if (reg_last_sets[regno + i])
1799 		  add_dependence (insn, reg_last_sets[regno + i], 0);
1800 		if ((call_used_regs[regno + i] || global_regs[regno + i])
1801 		    && last_function_call)
1802 		  /* Function calls clobber all call_used regs.  */
1803 		  add_dependence (insn, last_function_call, REG_DEP_ANTI);
1804 	      }
1805 	  }
1806 	else
1807 	  {
1808 	    reg_last_uses[regno]
1809 	      = gen_rtx (INSN_LIST, VOIDmode, insn, reg_last_uses[regno]);
1810 	    if (reg_last_sets[regno])
1811 	      add_dependence (insn, reg_last_sets[regno], 0);
1812 
1813 	    /* Pseudos that are REG_EQUIV to something may be replaced
1814 	       by that during reloading.  We need only add dependencies for
1815 	       the address in the REG_EQUIV note.  */
1816 	    if (! reload_completed
1817 		&& reg_known_equiv_p[regno]
1818 		&& GET_CODE (reg_known_value[regno]) == MEM)
1819 	      sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
1820 
1821 	    /* If the register does not already cross any calls, then add this
1822 	       insn to the sched_before_next_call list so that it will still
1823 	       not cross calls after scheduling.  */
1824 	    if (reg_n_calls_crossed[regno] == 0)
1825 	      add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
1826 	  }
1827 	return;
1828       }
1829 
1830     case MEM:
1831       {
1832 	/* Reading memory.  */
1833 
1834 	rtx pending, pending_mem;
1835 
1836 	pending = pending_read_insns;
1837 	pending_mem = pending_read_mems;
1838 	while (pending)
1839 	  {
1840 	    /* If a dependency already exists, don't create a new one.  */
1841 	    if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1842 	      if (read_dependence (XEXP (pending_mem, 0), x))
1843 		add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1844 
1845 	    pending = XEXP (pending, 1);
1846 	    pending_mem = XEXP (pending_mem, 1);
1847 	  }
1848 
1849 	pending = pending_write_insns;
1850 	pending_mem = pending_write_mems;
1851 	while (pending)
1852 	  {
1853 	    /* If a dependency already exists, don't create a new one.  */
1854 	    if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1855 	      if (true_dependence (XEXP (pending_mem, 0), x))
1856 		add_dependence (insn, XEXP (pending, 0), 0);
1857 
1858 	    pending = XEXP (pending, 1);
1859 	    pending_mem = XEXP (pending_mem, 1);
1860 	  }
1861 	if (last_pending_memory_flush)
1862 	  add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1863 
1864 	/* Always add these dependencies to pending_reads, since
1865 	   this insn may be followed by a write.  */
1866 	add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
1867 				 insn, x);
1868 
1869 	/* Take advantage of tail recursion here.  */
1870 	sched_analyze_2 (XEXP (x, 0), insn);
1871 	return;
1872       }
1873 
1874     case ASM_OPERANDS:
1875     case ASM_INPUT:
1876     case UNSPEC_VOLATILE:
1877     case TRAP_IF:
1878       {
1879 	rtx u;
1880 
1881 	/* Traditional and volatile asm instructions must be considered to use
1882 	   and clobber all hard registers and all of memory.  So must
1883 	   TRAP_IF and UNSPEC_VOLATILE operations.  */
1884 	if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
1885 	  {
1886 	    for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1887 	      {
1888 		for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1889 		  add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1890 		reg_last_uses[i] = 0;
1891 		if (reg_last_sets[i])
1892 		  add_dependence (insn, reg_last_sets[i], 0);
1893 		reg_last_sets[i] = insn;
1894 	      }
1895 
1896 	    flush_pending_lists (insn);
1897 	  }
1898 
1899 	/* For all ASM_OPERANDS, we must traverse the vector of input operands.
1900 	   We can not just fall through here since then we would be confused
1901 	   by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1902 	   traditional asms unlike their normal usage.  */
1903 
1904 	if (code == ASM_OPERANDS)
1905 	  {
1906 	    for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1907 	      sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
1908 	    return;
1909 	  }
1910 	break;
1911       }
1912 
1913     case PRE_DEC:
1914     case POST_DEC:
1915     case PRE_INC:
1916     case POST_INC:
1917       /* These both read and modify the result.  We must handle them as writes
1918 	 to get proper dependencies for following instructions.  We must handle
1919 	 them as reads to get proper dependencies from this to previous
1920 	 instructions.  Thus we need to pass them to both sched_analyze_1
1921 	 and sched_analyze_2.  We must call sched_analyze_2 first in order
1922 	 to get the proper antecedent for the read.  */
1923       sched_analyze_2 (XEXP (x, 0), insn);
1924       sched_analyze_1 (x, insn);
1925       return;
1926     }
1927 
1928   /* Other cases: walk the insn.  */
1929   fmt = GET_RTX_FORMAT (code);
1930   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1931     {
1932       if (fmt[i] == 'e')
1933 	sched_analyze_2 (XEXP (x, i), insn);
1934       else if (fmt[i] == 'E')
1935 	for (j = 0; j < XVECLEN (x, i); j++)
1936 	  sched_analyze_2 (XVECEXP (x, i, j), insn);
1937     }
1938 }
1939 
1940 /* Analyze an INSN with pattern X to find all dependencies.  */
1941 
1942 static void
sched_analyze_insn(x,insn)1943 sched_analyze_insn (x, insn)
1944      rtx x, insn;
1945 {
1946   register RTX_CODE code = GET_CODE (x);
1947   rtx link;
1948 
1949   if (code == SET || code == CLOBBER)
1950     sched_analyze_1 (x, insn);
1951   else if (code == PARALLEL)
1952     {
1953       register int i;
1954       for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1955 	{
1956 	  code = GET_CODE (XVECEXP (x, 0, i));
1957 	  if (code == SET || code == CLOBBER)
1958 	    sched_analyze_1 (XVECEXP (x, 0, i), insn);
1959 	  else
1960 	    sched_analyze_2 (XVECEXP (x, 0, i), insn);
1961 	}
1962     }
1963   else
1964     sched_analyze_2 (x, insn);
1965 
1966   /* Handle function calls.  */
1967   if (GET_CODE (insn) == CALL_INSN)
1968     {
1969       rtx dep_insn;
1970       rtx prev_dep_insn;
1971 
1972       /* When scheduling instructions, we make sure calls don't lose their
1973 	 accompanying USE insns by depending them one on another in order.   */
1974 
1975       prev_dep_insn = insn;
1976       dep_insn = PREV_INSN (insn);
1977       while (GET_CODE (dep_insn) == INSN
1978 	     && GET_CODE (PATTERN (dep_insn)) == USE)
1979 	{
1980 	  SCHED_GROUP_P (prev_dep_insn) = 1;
1981 
1982 	  /* Make a copy of all dependencies on dep_insn, and add to insn.
1983 	     This is so that all of the dependencies will apply to the
1984 	     group.  */
1985 
1986 	  for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
1987 	    add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
1988 
1989 	  prev_dep_insn = dep_insn;
1990 	  dep_insn = PREV_INSN (dep_insn);
1991 	}
1992     }
1993 }
1994 
1995 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1996    for every dependency.  */
1997 
1998 static int
sched_analyze(head,tail)1999 sched_analyze (head, tail)
2000      rtx head, tail;
2001 {
2002   register rtx insn;
2003   register int n_insns = 0;
2004   register rtx u;
2005   register int luid = 0;
2006 
2007   for (insn = head; ; insn = NEXT_INSN (insn))
2008     {
2009       INSN_LUID (insn) = luid++;
2010 
2011       if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2012 	{
2013 	  sched_analyze_insn (PATTERN (insn), insn);
2014 	  n_insns += 1;
2015 	}
2016       else if (GET_CODE (insn) == CALL_INSN)
2017 	{
2018 	  rtx dest = 0;
2019 	  rtx x;
2020 	  register int i;
2021 
2022 	  /* Any instruction using a hard register which may get clobbered
2023 	     by a call needs to be marked as dependent on this call.
2024 	     This prevents a use of a hard return reg from being moved
2025 	     past a void call (i.e. it does not explicitly set the hard
2026 	     return reg).  */
2027 
2028 	  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2029 	    if (call_used_regs[i] || global_regs[i])
2030 	      {
2031 		for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
2032 		  add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2033 		reg_last_uses[i] = 0;
2034 		if (reg_last_sets[i])
2035 		  add_dependence (insn, reg_last_sets[i], REG_DEP_ANTI);
2036 		reg_last_sets[i] = insn;
2037 		/* Insn, being a CALL_INSN, magically depends on
2038 		   `last_function_call' already.  */
2039 	      }
2040 
2041 	  /* For each insn which shouldn't cross a call, add a dependence
2042 	     between that insn and this call insn.  */
2043 	  x = LOG_LINKS (sched_before_next_call);
2044 	  while (x)
2045 	    {
2046 	      add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
2047 	      x = XEXP (x, 1);
2048 	    }
2049 	  LOG_LINKS (sched_before_next_call) = 0;
2050 
2051 	  sched_analyze_insn (PATTERN (insn), insn);
2052 
2053 	  /* We don't need to flush memory for a function call which does
2054 	     not involve memory.  */
2055 	  if (! CONST_CALL_P (insn))
2056 	    {
2057 	      /* In the absence of interprocedural alias analysis,
2058 		 we must flush all pending reads and writes, and
2059 		 start new dependencies starting from here.  */
2060 	      flush_pending_lists (insn);
2061 	    }
2062 
2063 	  /* Depend this function call (actually, the user of this
2064 	     function call) on all hard register clobberage.  */
2065 	  last_function_call = insn;
2066 	  n_insns += 1;
2067 	}
2068 
2069       if (insn == tail)
2070 	return n_insns;
2071     }
2072 }
2073 
2074 /* Called when we see a set of a register.  If death is true, then we are
2075    scanning backwards.  Mark that register as unborn.  If nobody says
2076    otherwise, that is how things will remain.  If death is false, then we
2077    are scanning forwards.  Mark that register as being born.  */
2078 
2079 static void
sched_note_set(b,x,death)2080 sched_note_set (b, x, death)
2081      int b;
2082      rtx x;
2083      int death;
2084 {
2085   register int regno, j;
2086   register rtx reg = SET_DEST (x);
2087   int subreg_p = 0;
2088 
2089   if (reg == 0)
2090     return;
2091 
2092   while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == STRICT_LOW_PART
2093 	 || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == ZERO_EXTRACT)
2094     {
2095       /* Must treat modification of just one hardware register of a multi-reg
2096 	 value or just a byte field of a register exactly the same way that
2097 	 mark_set_1 in flow.c does, i.e. anything except a paradoxical subreg
2098 	 does not kill the entire register.  */
2099       if (GET_CODE (reg) != SUBREG
2100 	  || REG_SIZE (SUBREG_REG (reg)) > REG_SIZE (reg))
2101 	subreg_p = 1;
2102 
2103       reg = SUBREG_REG (reg);
2104     }
2105 
2106   if (GET_CODE (reg) != REG)
2107     return;
2108 
2109   /* Global registers are always live, so the code below does not apply
2110      to them.  */
2111 
2112   regno = REGNO (reg);
2113   if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
2114     {
2115       register int offset = regno / REGSET_ELT_BITS;
2116       register REGSET_ELT_TYPE bit
2117 	= (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
2118 
2119       if (death)
2120 	{
2121 	  /* If we only set part of the register, then this set does not
2122 	     kill it.  */
2123 	  if (subreg_p)
2124 	    return;
2125 
2126 	  /* Try killing this register.  */
2127 	  if (regno < FIRST_PSEUDO_REGISTER)
2128 	    {
2129 	      int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
2130 	      while (--j >= 0)
2131 		{
2132 		  offset = (regno + j) / REGSET_ELT_BITS;
2133 		  bit = (REGSET_ELT_TYPE) 1 << ((regno + j) % REGSET_ELT_BITS);
2134 
2135 		  bb_live_regs[offset] &= ~bit;
2136 		  bb_dead_regs[offset] |= bit;
2137 		}
2138 	    }
2139 	  else
2140 	    {
2141 	      bb_live_regs[offset] &= ~bit;
2142 	      bb_dead_regs[offset] |= bit;
2143 	    }
2144 	}
2145       else
2146 	{
2147 	  /* Make the register live again.  */
2148 	  if (regno < FIRST_PSEUDO_REGISTER)
2149 	    {
2150 	      int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
2151 	      while (--j >= 0)
2152 		{
2153 		  offset = (regno + j) / REGSET_ELT_BITS;
2154 		  bit = (REGSET_ELT_TYPE) 1 << ((regno + j) % REGSET_ELT_BITS);
2155 
2156 		  bb_live_regs[offset] |= bit;
2157 		  bb_dead_regs[offset] &= ~bit;
2158 		}
2159 	    }
2160 	  else
2161 	    {
2162 	      bb_live_regs[offset] |= bit;
2163 	      bb_dead_regs[offset] &= ~bit;
2164 	    }
2165 	}
2166     }
2167 }
2168 
2169 /* Macros and functions for keeping the priority queue sorted, and
2170    dealing with queueing and unqueueing of instructions.  */
2171 
2172 #define SCHED_SORT(READY, NEW_READY, OLD_READY) \
2173   do { if ((NEW_READY) - (OLD_READY) == 1)				\
2174 	 swap_sort (READY, NEW_READY);					\
2175        else if ((NEW_READY) - (OLD_READY) > 1)				\
2176 	 qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); }	\
2177   while (0)
2178 
2179 /* Returns a positive value if y is preferred; returns a negative value if
2180    x is preferred.  Should never return 0, since that will make the sort
2181    unstable.  */
2182 
2183 static int
rank_for_schedule(x,y)2184 rank_for_schedule (x, y)
2185      rtx *x, *y;
2186 {
2187   rtx tmp = *y;
2188   rtx tmp2 = *x;
2189   rtx link;
2190   int tmp_class, tmp2_class;
2191   int value;
2192 
2193   /* Choose the instruction with the highest priority, if different.  */
2194   if (value = INSN_PRIORITY (tmp) - INSN_PRIORITY (tmp2))
2195     return value;
2196 
2197   if (last_scheduled_insn)
2198     {
2199       /* Classify the instructions into three classes:
2200 	 1) Data dependent on last schedule insn.
2201 	 2) Anti/Output dependent on last scheduled insn.
2202 	 3) Independent of last scheduled insn, or has latency of one.
2203 	 Choose the insn from the highest numbered class if different.  */
2204       link = find_insn_list (tmp, LOG_LINKS (last_scheduled_insn));
2205       if (link == 0 || insn_cost (tmp, link, last_scheduled_insn) == 1)
2206 	tmp_class = 3;
2207       else if (REG_NOTE_KIND (link) == 0) /* Data dependence.  */
2208 	tmp_class = 1;
2209       else
2210 	tmp_class = 2;
2211 
2212       link = find_insn_list (tmp2, LOG_LINKS (last_scheduled_insn));
2213       if (link == 0 || insn_cost (tmp2, link, last_scheduled_insn) == 1)
2214 	tmp2_class = 3;
2215       else if (REG_NOTE_KIND (link) == 0) /* Data dependence.  */
2216 	tmp2_class = 1;
2217       else
2218 	tmp2_class = 2;
2219 
2220       if (value = tmp_class - tmp2_class)
2221 	return value;
2222     }
2223 
2224   /* If insns are equally good, sort by INSN_LUID (original insn order),
2225      so that we make the sort stable.  This minimizes instruction movement,
2226      thus minimizing sched's effect on debugging and cross-jumping.  */
2227   return INSN_LUID (tmp) - INSN_LUID (tmp2);
2228 }
2229 
2230 /* Resort the array A in which only element at index N may be out of order.  */
2231 
2232 __inline static void
swap_sort(a,n)2233 swap_sort (a, n)
2234      rtx *a;
2235      int n;
2236 {
2237   rtx insn = a[n-1];
2238   int i = n-2;
2239 
2240   while (i >= 0 && rank_for_schedule (a+i, &insn) >= 0)
2241     {
2242       a[i+1] = a[i];
2243       i -= 1;
2244     }
2245   a[i+1] = insn;
2246 }
2247 
2248 static int max_priority;
2249 
2250 /* Add INSN to the insn queue so that it fires at least N_CYCLES
2251    before the currently executing insn.  */
2252 
2253 __inline static void
queue_insn(insn,n_cycles)2254 queue_insn (insn, n_cycles)
2255      rtx insn;
2256      int n_cycles;
2257 {
2258   int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
2259   NEXT_INSN (insn) = insn_queue[next_q];
2260   insn_queue[next_q] = insn;
2261   q_size += 1;
2262 }
2263 
2264 /* Return nonzero if PAT is the pattern of an insn which makes a
2265    register live.  */
2266 
2267 __inline static int
birthing_insn_p(pat)2268 birthing_insn_p (pat)
2269      rtx pat;
2270 {
2271   int j;
2272 
2273   if (reload_completed == 1)
2274     return 0;
2275 
2276   if (GET_CODE (pat) == SET
2277       && GET_CODE (SET_DEST (pat)) == REG)
2278     {
2279       rtx dest = SET_DEST (pat);
2280       int i = REGNO (dest);
2281       int offset = i / REGSET_ELT_BITS;
2282       REGSET_ELT_TYPE bit = (REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS);
2283 
2284       /* It would be more accurate to use refers_to_regno_p or
2285 	 reg_mentioned_p to determine when the dest is not live before this
2286 	 insn.  */
2287 
2288       if (bb_live_regs[offset] & bit)
2289 	return (reg_n_sets[i] == 1);
2290 
2291       return 0;
2292     }
2293   if (GET_CODE (pat) == PARALLEL)
2294     {
2295       for (j = 0; j < XVECLEN (pat, 0); j++)
2296 	if (birthing_insn_p (XVECEXP (pat, 0, j)))
2297 	  return 1;
2298     }
2299   return 0;
2300 }
2301 
2302 /* PREV is an insn that is ready to execute.  Adjust its priority if that
2303    will help shorten register lifetimes.  */
2304 
2305 __inline static void
adjust_priority(prev)2306 adjust_priority (prev)
2307      rtx prev;
2308 {
2309   /* Trying to shorten register lives after reload has completed
2310      is useless and wrong.  It gives inaccurate schedules.  */
2311   if (reload_completed == 0)
2312     {
2313       rtx note;
2314       int n_deaths = 0;
2315 
2316       /* ??? This code has no effect, because REG_DEAD notes are removed
2317 	 before we ever get here.  */
2318       for (note = REG_NOTES (prev); note; note = XEXP (note, 1))
2319 	if (REG_NOTE_KIND (note) == REG_DEAD)
2320 	  n_deaths += 1;
2321 
2322       /* Defer scheduling insns which kill registers, since that
2323 	 shortens register lives.  Prefer scheduling insns which
2324 	 make registers live for the same reason.  */
2325       switch (n_deaths)
2326 	{
2327 	default:
2328 	  INSN_PRIORITY (prev) >>= 3;
2329 	  break;
2330 	case 3:
2331 	  INSN_PRIORITY (prev) >>= 2;
2332 	  break;
2333 	case 2:
2334 	case 1:
2335 	  INSN_PRIORITY (prev) >>= 1;
2336 	  break;
2337 	case 0:
2338 	  if (birthing_insn_p (PATTERN (prev)))
2339 	    {
2340 	      int max = max_priority;
2341 
2342 	      if (max > INSN_PRIORITY (prev))
2343 		INSN_PRIORITY (prev) = max;
2344 	    }
2345 	  break;
2346 	}
2347     }
2348 }
2349 
2350 /* INSN is the "currently executing insn".  Launch each insn which was
2351    waiting on INSN (in the backwards dataflow sense).  READY is a
2352    vector of insns which are ready to fire.  N_READY is the number of
2353    elements in READY.  CLOCK is the current virtual cycle.  */
2354 
2355 static int
schedule_insn(insn,ready,n_ready,clock)2356 schedule_insn (insn, ready, n_ready, clock)
2357      rtx insn;
2358      rtx *ready;
2359      int n_ready;
2360      int clock;
2361 {
2362   rtx link;
2363   int new_ready = n_ready;
2364 
2365   if (MAX_BLOCKAGE > 1)
2366     schedule_unit (insn_unit (insn), insn, clock);
2367 
2368   if (LOG_LINKS (insn) == 0)
2369     return n_ready;
2370 
2371   /* This is used by the function adjust_priority above.  */
2372   if (n_ready > 0)
2373     max_priority = MAX (INSN_PRIORITY (ready[0]), INSN_PRIORITY (insn));
2374   else
2375     max_priority = INSN_PRIORITY (insn);
2376 
2377   for (link = LOG_LINKS (insn); link != 0; link = XEXP (link, 1))
2378     {
2379       rtx prev = XEXP (link, 0);
2380       int cost = insn_cost (prev, link, insn);
2381 
2382       if ((INSN_REF_COUNT (prev) -= 1) != 0)
2383 	{
2384 	  /* We satisfied one requirement to fire PREV.  Record the earliest
2385 	     time when PREV can fire.  No need to do this if the cost is 1,
2386 	     because PREV can fire no sooner than the next cycle.  */
2387 	  if (cost > 1)
2388 	    INSN_TICK (prev) = MAX (INSN_TICK (prev), clock + cost);
2389 	}
2390       else
2391 	{
2392 	  /* We satisfied the last requirement to fire PREV.  Ensure that all
2393 	     timing requirements are satisfied.  */
2394 	  if (INSN_TICK (prev) - clock > cost)
2395 	    cost = INSN_TICK (prev) - clock;
2396 
2397 	  /* Adjust the priority of PREV and either put it on the ready
2398 	     list or queue it.  */
2399 	  adjust_priority (prev);
2400 	  if (cost <= 1)
2401 	    ready[new_ready++] = prev;
2402 	  else
2403 	    queue_insn (prev, cost);
2404 	}
2405     }
2406 
2407   return new_ready;
2408 }
2409 
2410 /* Given N_READY insns in the ready list READY at time CLOCK, queue
2411    those that are blocked due to function unit hazards and rearrange
2412    the remaining ones to minimize subsequent function unit hazards.  */
2413 
2414 static int
schedule_select(ready,n_ready,clock,file)2415 schedule_select (ready, n_ready, clock, file)
2416      rtx *ready;
2417      int n_ready, clock;
2418      FILE *file;
2419 {
2420   int pri = INSN_PRIORITY (ready[0]);
2421   int i, j, k, q, cost, best_cost, best_insn = 0, new_ready = n_ready;
2422   rtx insn;
2423 
2424   /* Work down the ready list in groups of instructions with the same
2425      priority value.  Queue insns in the group that are blocked and
2426      select among those that remain for the one with the largest
2427      potential hazard.  */
2428   for (i = 0; i < n_ready; i = j)
2429     {
2430       int opri = pri;
2431       for (j = i + 1; j < n_ready; j++)
2432 	if ((pri = INSN_PRIORITY (ready[j])) != opri)
2433 	  break;
2434 
2435       /* Queue insns in the group that are blocked.  */
2436       for (k = i, q = 0; k < j; k++)
2437 	{
2438 	  insn = ready[k];
2439 	  if ((cost = actual_hazard (insn_unit (insn), insn, clock, 0)) != 0)
2440 	    {
2441 	      q++;
2442 	      ready[k] = 0;
2443 	      queue_insn (insn, cost);
2444 	      if (file)
2445 		fprintf (file, "\n;; blocking insn %d for %d cycles",
2446 			 INSN_UID (insn), cost);
2447 	    }
2448 	}
2449       new_ready -= q;
2450 
2451       /* Check the next group if all insns were queued.  */
2452       if (j - i - q == 0)
2453 	continue;
2454 
2455       /* If more than one remains, select the first one with the largest
2456 	 potential hazard.  */
2457       else if (j - i - q > 1)
2458 	{
2459 	  best_cost = -1;
2460 	  for (k = i; k < j; k++)
2461 	    {
2462 	      if ((insn = ready[k]) == 0)
2463 		continue;
2464 	      if ((cost = potential_hazard (insn_unit (insn), insn, 0))
2465 		  > best_cost)
2466 		{
2467 		  best_cost = cost;
2468 		  best_insn = k;
2469 		}
2470 	    }
2471 	}
2472       /* We have found a suitable insn to schedule.  */
2473       break;
2474     }
2475 
2476   /* Move the best insn to be front of the ready list.  */
2477   if (best_insn != 0)
2478     {
2479       if (file)
2480 	{
2481 	  fprintf (file, ", now");
2482 	  for (i = 0; i < n_ready; i++)
2483 	    if (ready[i])
2484 	      fprintf (file, " %d", INSN_UID (ready[i]));
2485 	  fprintf (file, "\n;; insn %d has a greater potential hazard",
2486 		   INSN_UID (ready[best_insn]));
2487 	}
2488       for (i = best_insn; i > 0; i--)
2489 	{
2490 	  insn = ready[i-1];
2491 	  ready[i-1] = ready[i];
2492 	  ready[i] = insn;
2493 	}
2494     }
2495 
2496   /* Compact the ready list.  */
2497   if (new_ready < n_ready)
2498     for (i = j = 0; i < n_ready; i++)
2499       if (ready[i])
2500 	ready[j++] = ready[i];
2501 
2502   return new_ready;
2503 }
2504 
2505 /* Add a REG_DEAD note for REG to INSN, reusing a REG_DEAD note from the
2506    dead_notes list.  */
2507 
2508 static void
create_reg_dead_note(reg,insn)2509 create_reg_dead_note (reg, insn)
2510      rtx reg, insn;
2511 {
2512   rtx link, backlink;
2513 
2514   /* The number of registers killed after scheduling must be the same as the
2515      number of registers killed before scheduling.  The number of REG_DEAD
2516      notes may not be conserved, i.e. two SImode hard register REG_DEAD notes
2517      might become one DImode hard register REG_DEAD note, but the number of
2518      registers killed will be conserved.
2519 
2520      We carefully remove REG_DEAD notes from the dead_notes list, so that
2521      there will be none left at the end.  If we run out early, then there
2522      is a bug somewhere in flow, combine and/or sched.  */
2523 
2524   if (dead_notes == 0)
2525     {
2526 #if 1
2527       abort ();
2528 #else
2529       link = rtx_alloc (EXPR_LIST);
2530       PUT_REG_NOTE_KIND (link, REG_DEAD);
2531 #endif
2532     }
2533   else
2534     {
2535       /* Number of regs killed by REG.  */
2536       int regs_killed = (REGNO (reg) >= FIRST_PSEUDO_REGISTER ? 1
2537 			 : HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)));
2538       /* Number of regs killed by REG_DEAD notes taken off the list.  */
2539       int reg_note_regs;
2540 
2541       link = dead_notes;
2542       reg_note_regs = (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
2543 		       : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
2544 					   GET_MODE (XEXP (link, 0))));
2545       while (reg_note_regs < regs_killed)
2546 	{
2547 	  link = XEXP (link, 1);
2548 	  reg_note_regs += (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
2549 			    : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
2550 						GET_MODE (XEXP (link, 0))));
2551 	}
2552       dead_notes = XEXP (link, 1);
2553 
2554       /* If we took too many regs kills off, put the extra ones back.  */
2555       while (reg_note_regs > regs_killed)
2556 	{
2557 	  rtx temp_reg, temp_link;
2558 
2559 	  temp_reg = gen_rtx (REG, word_mode, 0);
2560 	  temp_link = rtx_alloc (EXPR_LIST);
2561 	  PUT_REG_NOTE_KIND (temp_link, REG_DEAD);
2562 	  XEXP (temp_link, 0) = temp_reg;
2563 	  XEXP (temp_link, 1) = dead_notes;
2564 	  dead_notes = temp_link;
2565 	  reg_note_regs--;
2566 	}
2567     }
2568 
2569   XEXP (link, 0) = reg;
2570   XEXP (link, 1) = REG_NOTES (insn);
2571   REG_NOTES (insn) = link;
2572 }
2573 
2574 /* Subroutine on attach_deaths_insn--handles the recursive search
2575    through INSN.  If SET_P is true, then x is being modified by the insn.  */
2576 
2577 static void
attach_deaths(x,insn,set_p)2578 attach_deaths (x, insn, set_p)
2579      rtx x;
2580      rtx insn;
2581      int set_p;
2582 {
2583   register int i;
2584   register int j;
2585   register enum rtx_code code;
2586   register char *fmt;
2587 
2588   if (x == 0)
2589     return;
2590 
2591   code = GET_CODE (x);
2592 
2593   switch (code)
2594     {
2595     case CONST_INT:
2596     case CONST_DOUBLE:
2597     case LABEL_REF:
2598     case SYMBOL_REF:
2599     case CONST:
2600     case CODE_LABEL:
2601     case PC:
2602     case CC0:
2603       /* Get rid of the easy cases first.  */
2604       return;
2605 
2606     case REG:
2607       {
2608 	/* If the register dies in this insn, queue that note, and mark
2609 	   this register as needing to die.  */
2610 	/* This code is very similar to mark_used_1 (if set_p is false)
2611 	   and mark_set_1 (if set_p is true) in flow.c.  */
2612 
2613 	register int regno = REGNO (x);
2614 	register int offset = regno / REGSET_ELT_BITS;
2615 	register REGSET_ELT_TYPE bit
2616 	  = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
2617 	REGSET_ELT_TYPE all_needed = (old_live_regs[offset] & bit);
2618 	REGSET_ELT_TYPE some_needed = (old_live_regs[offset] & bit);
2619 
2620 	if (set_p)
2621 	  return;
2622 
2623 	if (regno < FIRST_PSEUDO_REGISTER)
2624 	  {
2625 	    int n;
2626 
2627 	    n = HARD_REGNO_NREGS (regno, GET_MODE (x));
2628 	    while (--n > 0)
2629 	      {
2630 		some_needed |= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
2631 				& ((REGSET_ELT_TYPE) 1
2632 				   << ((regno + n) % REGSET_ELT_BITS)));
2633 		all_needed &= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
2634 			       & ((REGSET_ELT_TYPE) 1
2635 				  << ((regno + n) % REGSET_ELT_BITS)));
2636 	      }
2637 	  }
2638 
2639 	/* If it wasn't live before we started, then add a REG_DEAD note.
2640 	   We must check the previous lifetime info not the current info,
2641 	   because we may have to execute this code several times, e.g.
2642 	   once for a clobber (which doesn't add a note) and later
2643 	   for a use (which does add a note).
2644 
2645 	   Always make the register live.  We must do this even if it was
2646 	   live before, because this may be an insn which sets and uses
2647 	   the same register, in which case the register has already been
2648 	   killed, so we must make it live again.
2649 
2650 	   Global registers are always live, and should never have a REG_DEAD
2651 	   note added for them, so none of the code below applies to them.  */
2652 
2653 	if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
2654 	  {
2655 	    /* Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2656 	       STACK_POINTER_REGNUM, since these are always considered to be
2657 	       live.  Similarly for ARG_POINTER_REGNUM if it is fixed.  */
2658 	    if (regno != FRAME_POINTER_REGNUM
2659 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2660 		&& ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
2661 #endif
2662 		&& regno != STACK_POINTER_REGNUM)
2663 	      {
2664 		if (! all_needed && ! dead_or_set_p (insn, x))
2665 		  {
2666 		    /* If none of the words in X is needed, make a REG_DEAD
2667 		       note.  Otherwise, we must make partial REG_DEAD
2668 		       notes.  */
2669 		    if (! some_needed)
2670 		      create_reg_dead_note (x, insn);
2671 		    else
2672 		      {
2673 			int i;
2674 
2675 			/* Don't make a REG_DEAD note for a part of a
2676 			   register that is set in the insn.  */
2677 			for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
2678 			     i >= 0; i--)
2679 			  if ((old_live_regs[(regno + i) / REGSET_ELT_BITS]
2680 			       & ((REGSET_ELT_TYPE) 1
2681 				  << ((regno +i) % REGSET_ELT_BITS))) == 0
2682 			      && ! dead_or_set_regno_p (insn, regno + i))
2683 			    create_reg_dead_note (gen_rtx (REG, word_mode,
2684 							   regno + i),
2685 						  insn);
2686 		      }
2687 		  }
2688 	      }
2689 
2690 	    if (regno < FIRST_PSEUDO_REGISTER)
2691 	      {
2692 		int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
2693 		while (--j >= 0)
2694 		  {
2695 		    offset = (regno + j) / REGSET_ELT_BITS;
2696 		    bit
2697 		      = (REGSET_ELT_TYPE) 1 << ((regno + j) % REGSET_ELT_BITS);
2698 
2699 		    bb_dead_regs[offset] &= ~bit;
2700 		    bb_live_regs[offset] |= bit;
2701 		  }
2702 	      }
2703 	    else
2704 	      {
2705 		bb_dead_regs[offset] &= ~bit;
2706 		bb_live_regs[offset] |= bit;
2707 	      }
2708 	  }
2709 	return;
2710       }
2711 
2712     case MEM:
2713       /* Handle tail-recursive case.  */
2714       attach_deaths (XEXP (x, 0), insn, 0);
2715       return;
2716 
2717     case SUBREG:
2718     case STRICT_LOW_PART:
2719       /* These two cases preserve the value of SET_P, so handle them
2720 	 separately.  */
2721       attach_deaths (XEXP (x, 0), insn, set_p);
2722       return;
2723 
2724     case ZERO_EXTRACT:
2725     case SIGN_EXTRACT:
2726       /* This case preserves the value of SET_P for the first operand, but
2727 	 clears it for the other two.  */
2728       attach_deaths (XEXP (x, 0), insn, set_p);
2729       attach_deaths (XEXP (x, 1), insn, 0);
2730       attach_deaths (XEXP (x, 2), insn, 0);
2731       return;
2732 
2733     default:
2734       /* Other cases: walk the insn.  */
2735       fmt = GET_RTX_FORMAT (code);
2736       for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2737 	{
2738 	  if (fmt[i] == 'e')
2739 	    attach_deaths (XEXP (x, i), insn, 0);
2740 	  else if (fmt[i] == 'E')
2741 	    for (j = 0; j < XVECLEN (x, i); j++)
2742 	      attach_deaths (XVECEXP (x, i, j), insn, 0);
2743 	}
2744     }
2745 }
2746 
2747 /* After INSN has executed, add register death notes for each register
2748    that is dead after INSN.  */
2749 
2750 static void
attach_deaths_insn(insn)2751 attach_deaths_insn (insn)
2752      rtx insn;
2753 {
2754   rtx x = PATTERN (insn);
2755   register RTX_CODE code = GET_CODE (x);
2756 
2757   if (code == SET)
2758     {
2759       attach_deaths (SET_SRC (x), insn, 0);
2760 
2761       /* A register might die here even if it is the destination, e.g.
2762 	 it is the target of a volatile read and is otherwise unused.
2763 	 Hence we must always call attach_deaths for the SET_DEST.  */
2764       attach_deaths (SET_DEST (x), insn, 1);
2765     }
2766   else if (code == PARALLEL)
2767     {
2768       register int i;
2769       for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
2770 	{
2771 	  code = GET_CODE (XVECEXP (x, 0, i));
2772 	  if (code == SET)
2773 	    {
2774 	      attach_deaths (SET_SRC (XVECEXP (x, 0, i)), insn, 0);
2775 
2776 	      attach_deaths (SET_DEST (XVECEXP (x, 0, i)), insn, 1);
2777 	    }
2778 	  /* Flow does not add REG_DEAD notes to registers that die in
2779 	     clobbers, so we can't either.  */
2780 	  else if (code != CLOBBER)
2781 	    attach_deaths (XVECEXP (x, 0, i), insn, 0);
2782 	}
2783     }
2784   /* Flow does not add REG_DEAD notes to registers that die in
2785      clobbers, so we can't either.  */
2786   else if (code != CLOBBER)
2787     attach_deaths (x, insn, 0);
2788 }
2789 
2790 /* Delete notes beginning with INSN and maybe put them in the chain
2791    of notes ended by NOTE_LIST.
2792    Returns the insn following the notes.  */
2793 
2794 static rtx
unlink_notes(insn,tail)2795 unlink_notes (insn, tail)
2796      rtx insn, tail;
2797 {
2798   rtx prev = PREV_INSN (insn);
2799 
2800   while (insn != tail && GET_CODE (insn) == NOTE)
2801     {
2802       rtx next = NEXT_INSN (insn);
2803       /* Delete the note from its current position.  */
2804       if (prev)
2805 	NEXT_INSN (prev) = next;
2806       if (next)
2807 	PREV_INSN (next) = prev;
2808 
2809       if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
2810 	/* Record line-number notes so they can be reused.  */
2811 	LINE_NOTE (insn) = insn;
2812       else
2813 	{
2814 	  /* Insert the note at the end of the notes list.  */
2815 	  PREV_INSN (insn) = note_list;
2816 	  if (note_list)
2817 	    NEXT_INSN (note_list) = insn;
2818 	  note_list = insn;
2819 	}
2820 
2821       insn = next;
2822     }
2823   return insn;
2824 }
2825 
2826 /* Data structure for keeping track of register information
2827    during that register's life.  */
2828 
2829 struct sometimes
2830 {
2831   short offset; short bit;
2832   short live_length; short calls_crossed;
2833 };
2834 
2835 /* Constructor for `sometimes' data structure.  */
2836 
2837 static int
new_sometimes_live(regs_sometimes_live,offset,bit,sometimes_max)2838 new_sometimes_live (regs_sometimes_live, offset, bit, sometimes_max)
2839      struct sometimes *regs_sometimes_live;
2840      int offset, bit;
2841      int sometimes_max;
2842 {
2843   register struct sometimes *p;
2844   register int regno = offset * REGSET_ELT_BITS + bit;
2845   int i;
2846 
2847   /* There should never be a register greater than max_regno here.  If there
2848      is, it means that a define_split has created a new pseudo reg.  This
2849      is not allowed, since there will not be flow info available for any
2850      new register, so catch the error here.  */
2851   if (regno >= max_regno)
2852     abort ();
2853 
2854   p = &regs_sometimes_live[sometimes_max];
2855   p->offset = offset;
2856   p->bit = bit;
2857   p->live_length = 0;
2858   p->calls_crossed = 0;
2859   sometimes_max++;
2860   return sometimes_max;
2861 }
2862 
2863 /* Count lengths of all regs we are currently tracking,
2864    and find new registers no longer live.  */
2865 
2866 static void
finish_sometimes_live(regs_sometimes_live,sometimes_max)2867 finish_sometimes_live (regs_sometimes_live, sometimes_max)
2868      struct sometimes *regs_sometimes_live;
2869      int sometimes_max;
2870 {
2871   int i;
2872 
2873   for (i = 0; i < sometimes_max; i++)
2874     {
2875       register struct sometimes *p = &regs_sometimes_live[i];
2876       int regno;
2877 
2878       regno = p->offset * REGSET_ELT_BITS + p->bit;
2879 
2880       sched_reg_live_length[regno] += p->live_length;
2881       sched_reg_n_calls_crossed[regno] += p->calls_crossed;
2882     }
2883 }
2884 
2885 /* Use modified list scheduling to rearrange insns in basic block
2886    B.  FILE, if nonzero, is where we dump interesting output about
2887    this pass.  */
2888 
2889 static void
schedule_block(b,file)2890 schedule_block (b, file)
2891      int b;
2892      FILE *file;
2893 {
2894   rtx insn, last;
2895   rtx last_note = 0;
2896   rtx *ready, link;
2897   int i, j, n_ready = 0, new_ready, n_insns = 0;
2898   int sched_n_insns = 0;
2899   int clock;
2900 #define NEED_NOTHING	0
2901 #define NEED_HEAD	1
2902 #define NEED_TAIL	2
2903   int new_needs;
2904 
2905   /* HEAD and TAIL delimit the region being scheduled.  */
2906   rtx head = basic_block_head[b];
2907   rtx tail = basic_block_end[b];
2908   /* PREV_HEAD and NEXT_TAIL are the boundaries of the insns
2909      being scheduled.  When the insns have been ordered,
2910      these insns delimit where the new insns are to be
2911      spliced back into the insn chain.  */
2912   rtx next_tail;
2913   rtx prev_head;
2914 
2915   /* Keep life information accurate.  */
2916   register struct sometimes *regs_sometimes_live;
2917   int sometimes_max;
2918 
2919   if (file)
2920     fprintf (file, ";;\t -- basic block number %d from %d to %d --\n",
2921 	     b, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
2922 
2923   i = max_reg_num ();
2924   reg_last_uses = (rtx *) alloca (i * sizeof (rtx));
2925   bzero (reg_last_uses, i * sizeof (rtx));
2926   reg_last_sets = (rtx *) alloca (i * sizeof (rtx));
2927   bzero (reg_last_sets, i * sizeof (rtx));
2928   clear_units ();
2929 
2930   /* Remove certain insns at the beginning from scheduling,
2931      by advancing HEAD.  */
2932 
2933   /* At the start of a function, before reload has run, don't delay getting
2934      parameters from hard registers into pseudo registers.  */
2935   if (reload_completed == 0 && b == 0)
2936     {
2937       while (head != tail
2938 	     && GET_CODE (head) == NOTE
2939 	     && NOTE_LINE_NUMBER (head) != NOTE_INSN_FUNCTION_BEG)
2940 	head = NEXT_INSN (head);
2941       while (head != tail
2942 	     && GET_CODE (head) == INSN
2943 	     && GET_CODE (PATTERN (head)) == SET)
2944 	{
2945 	  rtx src = SET_SRC (PATTERN (head));
2946 	  while (GET_CODE (src) == SUBREG
2947 		 || GET_CODE (src) == SIGN_EXTEND
2948 		 || GET_CODE (src) == ZERO_EXTEND
2949 		 || GET_CODE (src) == SIGN_EXTRACT
2950 		 || GET_CODE (src) == ZERO_EXTRACT)
2951 	    src = XEXP (src, 0);
2952 	  if (GET_CODE (src) != REG
2953 	      || REGNO (src) >= FIRST_PSEUDO_REGISTER)
2954 	    break;
2955 	  /* Keep this insn from ever being scheduled.  */
2956 	  INSN_REF_COUNT (head) = 1;
2957 	  head = NEXT_INSN (head);
2958 	}
2959     }
2960 
2961   /* Don't include any notes or labels at the beginning of the
2962      basic block, or notes at the ends of basic blocks.  */
2963   while (head != tail)
2964     {
2965       if (GET_CODE (head) == NOTE)
2966 	head = NEXT_INSN (head);
2967       else if (GET_CODE (tail) == NOTE)
2968 	tail = PREV_INSN (tail);
2969       else if (GET_CODE (head) == CODE_LABEL)
2970 	head = NEXT_INSN (head);
2971       else break;
2972     }
2973   /* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
2974      to schedule this block.  */
2975   if (head == tail
2976       && (GET_CODE (head) == NOTE || GET_CODE (head) == CODE_LABEL))
2977     return;
2978 
2979 #if 0
2980   /* This short-cut doesn't work.  It does not count call insns crossed by
2981      registers in reg_sometimes_live.  It does not mark these registers as
2982      dead if they die in this block.  It does not mark these registers live
2983      (or create new reg_sometimes_live entries if necessary) if they are born
2984      in this block.
2985 
2986      The easy solution is to just always schedule a block.  This block only
2987      has one insn, so this won't slow down this pass by much.  */
2988 
2989   if (head == tail)
2990     return;
2991 #endif
2992 
2993   /* Now HEAD through TAIL are the insns actually to be rearranged;
2994      Let PREV_HEAD and NEXT_TAIL enclose them.  */
2995   prev_head = PREV_INSN (head);
2996   next_tail = NEXT_INSN (tail);
2997 
2998   /* Initialize basic block data structures.  */
2999   dead_notes = 0;
3000   pending_read_insns = 0;
3001   pending_read_mems = 0;
3002   pending_write_insns = 0;
3003   pending_write_mems = 0;
3004   pending_lists_length = 0;
3005   last_pending_memory_flush = 0;
3006   last_function_call = 0;
3007   last_scheduled_insn = 0;
3008 
3009   LOG_LINKS (sched_before_next_call) = 0;
3010 
3011   n_insns += sched_analyze (head, tail);
3012   if (n_insns == 0)
3013     {
3014       free_pending_lists ();
3015       return;
3016     }
3017 
3018   /* Allocate vector to hold insns to be rearranged (except those
3019      insns which are controlled by an insn with SCHED_GROUP_P set).
3020      All these insns are included between ORIG_HEAD and ORIG_TAIL,
3021      as those variables ultimately are set up.  */
3022   ready = (rtx *) alloca ((n_insns+1) * sizeof (rtx));
3023 
3024   /* TAIL is now the last of the insns to be rearranged.
3025      Put those insns into the READY vector.  */
3026   insn = tail;
3027 
3028   /* For all branches, calls, uses, and cc0 setters, force them to remain
3029      in order at the end of the block by adding dependencies and giving
3030      the last a high priority.  There may be notes present, and prev_head
3031      may also be a note.
3032 
3033      Branches must obviously remain at the end.  Calls should remain at the
3034      end since moving them results in worse register allocation.  Uses remain
3035      at the end to ensure proper register allocation.  cc0 setters remaim
3036      at the end because they can't be moved away from their cc0 user.  */
3037   last = 0;
3038   while (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
3039 	 || (GET_CODE (insn) == INSN
3040 	     && (GET_CODE (PATTERN (insn)) == USE
3041 #ifdef HAVE_cc0
3042 		 || sets_cc0_p (PATTERN (insn))
3043 #endif
3044 		 ))
3045 	 || GET_CODE (insn) == NOTE)
3046     {
3047       if (GET_CODE (insn) != NOTE)
3048 	{
3049 	  priority (insn);
3050 	  if (last == 0)
3051 	    {
3052 	      ready[n_ready++] = insn;
3053 	      INSN_PRIORITY (insn) = TAIL_PRIORITY - i;
3054 	      INSN_REF_COUNT (insn) = 0;
3055 	    }
3056 	  else if (! find_insn_list (insn, LOG_LINKS (last)))
3057 	    {
3058 	      add_dependence (last, insn, REG_DEP_ANTI);
3059 	      INSN_REF_COUNT (insn)++;
3060 	    }
3061 	  last = insn;
3062 
3063 	  /* Skip over insns that are part of a group.  */
3064 	  while (SCHED_GROUP_P (insn))
3065 	    {
3066 	      insn = prev_nonnote_insn (insn);
3067 	      priority (insn);
3068 	    }
3069 	}
3070 
3071       insn = PREV_INSN (insn);
3072       /* Don't overrun the bounds of the basic block.  */
3073       if (insn == prev_head)
3074 	break;
3075     }
3076 
3077   /* Assign priorities to instructions.  Also check whether they
3078      are in priority order already.  If so then I will be nonnegative.
3079      We use this shortcut only before reloading.  */
3080 #if 0
3081   i = reload_completed ? DONE_PRIORITY : MAX_PRIORITY;
3082 #endif
3083 
3084   for (; insn != prev_head; insn = PREV_INSN (insn))
3085     {
3086       if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3087 	{
3088 	  priority (insn);
3089 	  if (INSN_REF_COUNT (insn) == 0)
3090 	    {
3091 	      if (last == 0)
3092 		ready[n_ready++] = insn;
3093 	      else
3094 		{
3095 		  /* Make this dependent on the last of the instructions
3096 		     that must remain in order at the end of the block.  */
3097 		  add_dependence (last, insn, REG_DEP_ANTI);
3098 		  INSN_REF_COUNT (insn) = 1;
3099 		}
3100 	    }
3101 	  if (SCHED_GROUP_P (insn))
3102 	    {
3103 	      while (SCHED_GROUP_P (insn))
3104 		{
3105 		  insn = PREV_INSN (insn);
3106 		  while (GET_CODE (insn) == NOTE)
3107 		    insn = PREV_INSN (insn);
3108 		  priority (insn);
3109 		}
3110 	      continue;
3111 	    }
3112 #if 0
3113 	  if (i < 0)
3114 	    continue;
3115 	  if (INSN_PRIORITY (insn) < i)
3116 	    i = INSN_PRIORITY (insn);
3117 	  else if (INSN_PRIORITY (insn) > i)
3118 	    i = DONE_PRIORITY;
3119 #endif
3120 	}
3121     }
3122 
3123 #if 0
3124   /* This short-cut doesn't work.  It does not count call insns crossed by
3125      registers in reg_sometimes_live.  It does not mark these registers as
3126      dead if they die in this block.  It does not mark these registers live
3127      (or create new reg_sometimes_live entries if necessary) if they are born
3128      in this block.
3129 
3130      The easy solution is to just always schedule a block.  These blocks tend
3131      to be very short, so this doesn't slow down this pass by much.  */
3132 
3133   /* If existing order is good, don't bother to reorder.  */
3134   if (i != DONE_PRIORITY)
3135     {
3136       if (file)
3137 	fprintf (file, ";; already scheduled\n");
3138 
3139       if (reload_completed == 0)
3140 	{
3141 	  for (i = 0; i < sometimes_max; i++)
3142 	    regs_sometimes_live[i].live_length += n_insns;
3143 
3144 	  finish_sometimes_live (regs_sometimes_live, sometimes_max);
3145 	}
3146       free_pending_lists ();
3147       return;
3148     }
3149 #endif
3150 
3151   /* Scan all the insns to be scheduled, removing NOTE insns
3152      and register death notes.
3153      Line number NOTE insns end up in NOTE_LIST.
3154      Register death notes end up in DEAD_NOTES.
3155 
3156      Recreate the register life information for the end of this basic
3157      block.  */
3158 
3159   if (reload_completed == 0)
3160     {
3161       bcopy (basic_block_live_at_start[b], bb_live_regs, regset_bytes);
3162       bzero (bb_dead_regs, regset_bytes);
3163 
3164       if (b == 0)
3165 	{
3166 	  /* This is the first block in the function.  There may be insns
3167 	     before head that we can't schedule.   We still need to examine
3168 	     them though for accurate register lifetime analysis.  */
3169 
3170 	  /* We don't want to remove any REG_DEAD notes as the code below
3171 	     does.  */
3172 
3173 	  for (insn = basic_block_head[b]; insn != head;
3174 	       insn = NEXT_INSN (insn))
3175 	    if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3176 	      {
3177 		/* See if the register gets born here.  */
3178 		/* We must check for registers being born before we check for
3179 		   registers dying.  It is possible for a register to be born
3180 		   and die in the same insn, e.g. reading from a volatile
3181 		   memory location into an otherwise unused register.  Such
3182 		   a register must be marked as dead after this insn.  */
3183 		if (GET_CODE (PATTERN (insn)) == SET
3184 		    || GET_CODE (PATTERN (insn)) == CLOBBER)
3185 		  sched_note_set (b, PATTERN (insn), 0);
3186 		else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3187 		  {
3188 		    int j;
3189 		    for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3190 		      if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3191 			  || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3192 			sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
3193 
3194 		    /* ??? This code is obsolete and should be deleted.  It
3195 		       is harmless though, so we will leave it in for now.  */
3196 		    for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3197 		      if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
3198 			sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
3199 		  }
3200 
3201 		for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
3202 		  {
3203 		    if ((REG_NOTE_KIND (link) == REG_DEAD
3204 			 || REG_NOTE_KIND (link) == REG_UNUSED)
3205 			/* Verify that the REG_NOTE has a legal value.  */
3206 			&& GET_CODE (XEXP (link, 0)) == REG)
3207 		      {
3208 			register int regno = REGNO (XEXP (link, 0));
3209 			register int offset = regno / REGSET_ELT_BITS;
3210 			register REGSET_ELT_TYPE bit
3211 			  = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
3212 
3213 			if (regno < FIRST_PSEUDO_REGISTER)
3214 			  {
3215 			    int j = HARD_REGNO_NREGS (regno,
3216 						      GET_MODE (XEXP (link, 0)));
3217 			    while (--j >= 0)
3218 			      {
3219 				offset = (regno + j) / REGSET_ELT_BITS;
3220 				bit = ((REGSET_ELT_TYPE) 1
3221 				       << ((regno + j) % REGSET_ELT_BITS));
3222 
3223 				bb_live_regs[offset] &= ~bit;
3224 				bb_dead_regs[offset] |= bit;
3225 			      }
3226 			  }
3227 			else
3228 			  {
3229 			    bb_live_regs[offset] &= ~bit;
3230 			    bb_dead_regs[offset] |= bit;
3231 			  }
3232 		      }
3233 		  }
3234 	      }
3235 	}
3236     }
3237 
3238   /* If debugging information is being produced, keep track of the line
3239      number notes for each insn.  */
3240   if (write_symbols != NO_DEBUG)
3241     {
3242       /* We must use the true line number for the first insn in the block
3243 	 that was computed and saved at the start of this pass.  We can't
3244 	 use the current line number, because scheduling of the previous
3245 	 block may have changed the current line number.  */
3246       rtx line = line_note_head[b];
3247 
3248       for (insn = basic_block_head[b];
3249 	   insn != next_tail;
3250 	   insn = NEXT_INSN (insn))
3251 	if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3252 	  line = insn;
3253 	else
3254 	  LINE_NOTE (insn) = line;
3255     }
3256 
3257   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3258     {
3259       rtx prev, next, link;
3260 
3261       /* Farm out notes.  This is needed to keep the debugger from
3262 	 getting completely deranged.  */
3263       if (GET_CODE (insn) == NOTE)
3264 	{
3265 	  prev = insn;
3266 	  insn = unlink_notes (insn, next_tail);
3267 	  if (prev == tail)
3268 	    abort ();
3269 	  if (prev == head)
3270 	    abort ();
3271 	  if (insn == next_tail)
3272 	    abort ();
3273 	}
3274 
3275       if (reload_completed == 0
3276 	  && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3277 	{
3278 	  /* See if the register gets born here.  */
3279 	  /* We must check for registers being born before we check for
3280 	     registers dying.  It is possible for a register to be born and
3281 	     die in the same insn, e.g. reading from a volatile memory
3282 	     location into an otherwise unused register.  Such a register
3283 	     must be marked as dead after this insn.  */
3284 	  if (GET_CODE (PATTERN (insn)) == SET
3285 	      || GET_CODE (PATTERN (insn)) == CLOBBER)
3286 	    sched_note_set (b, PATTERN (insn), 0);
3287 	  else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3288 	    {
3289 	      int j;
3290 	      for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3291 		if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3292 		    || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3293 		  sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
3294 
3295 	      /* ??? This code is obsolete and should be deleted.  It
3296 		 is harmless though, so we will leave it in for now.  */
3297 	      for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3298 		if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
3299 		  sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
3300 	    }
3301 
3302 	  /* Need to know what registers this insn kills.  */
3303 	  for (prev = 0, link = REG_NOTES (insn); link; link = next)
3304 	    {
3305 	      int regno;
3306 
3307 	      next = XEXP (link, 1);
3308 	      if ((REG_NOTE_KIND (link) == REG_DEAD
3309 		   || REG_NOTE_KIND (link) == REG_UNUSED)
3310 		  /* Verify that the REG_NOTE has a legal value.  */
3311 		  && GET_CODE (XEXP (link, 0)) == REG)
3312 		{
3313 		  register int regno = REGNO (XEXP (link, 0));
3314 		  register int offset = regno / REGSET_ELT_BITS;
3315 		  register REGSET_ELT_TYPE bit
3316 		    = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
3317 
3318 		  /* Only unlink REG_DEAD notes; leave REG_UNUSED notes
3319 		     alone.  */
3320 		  if (REG_NOTE_KIND (link) == REG_DEAD)
3321 		    {
3322 		      if (prev)
3323 			XEXP (prev, 1) = next;
3324 		      else
3325 			REG_NOTES (insn) = next;
3326 		      XEXP (link, 1) = dead_notes;
3327 		      dead_notes = link;
3328 		    }
3329 		  else
3330 		    prev = link;
3331 
3332 		  if (regno < FIRST_PSEUDO_REGISTER)
3333 		    {
3334 		      int j = HARD_REGNO_NREGS (regno,
3335 						GET_MODE (XEXP (link, 0)));
3336 		      while (--j >= 0)
3337 			{
3338 			  offset = (regno + j) / REGSET_ELT_BITS;
3339 			  bit = ((REGSET_ELT_TYPE) 1
3340 				 << ((regno + j) % REGSET_ELT_BITS));
3341 
3342 			  bb_live_regs[offset] &= ~bit;
3343 			  bb_dead_regs[offset] |= bit;
3344 			}
3345 		    }
3346 		  else
3347 		    {
3348 		      bb_live_regs[offset] &= ~bit;
3349 		      bb_dead_regs[offset] |= bit;
3350 		    }
3351 		}
3352 	      else
3353 		prev = link;
3354 	    }
3355 	}
3356     }
3357 
3358   if (reload_completed == 0)
3359     {
3360       /* Keep track of register lives.  */
3361       old_live_regs = (regset) alloca (regset_bytes);
3362       regs_sometimes_live
3363 	= (struct sometimes *) alloca (max_regno * sizeof (struct sometimes));
3364       sometimes_max = 0;
3365 
3366       /* Start with registers live at end.  */
3367       for (j = 0; j < regset_size; j++)
3368 	{
3369 	  REGSET_ELT_TYPE live = bb_live_regs[j];
3370 	  old_live_regs[j] = live;
3371 	  if (live)
3372 	    {
3373 	      register REGSET_ELT_TYPE bit;
3374 	      for (bit = 0; bit < REGSET_ELT_BITS; bit++)
3375 		if (live & ((REGSET_ELT_TYPE) 1 << bit))
3376 		  sometimes_max = new_sometimes_live (regs_sometimes_live, j,
3377 						      bit, sometimes_max);
3378 	    }
3379 	}
3380     }
3381 
3382   SCHED_SORT (ready, n_ready, 1);
3383 
3384   if (file)
3385     {
3386       fprintf (file, ";; ready list initially:\n;; ");
3387       for (i = 0; i < n_ready; i++)
3388 	fprintf (file, "%d ", INSN_UID (ready[i]));
3389       fprintf (file, "\n\n");
3390 
3391       for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3392 	if (INSN_PRIORITY (insn) > 0)
3393 	  fprintf (file, ";; insn[%4d]: priority = %4d, ref_count = %4d\n",
3394 		   INSN_UID (insn), INSN_PRIORITY (insn),
3395 		   INSN_REF_COUNT (insn));
3396     }
3397 
3398   /* Now HEAD and TAIL are going to become disconnected
3399      entirely from the insn chain.  */
3400   tail = 0;
3401 
3402   /* Q_SIZE will always be zero here.  */
3403   q_ptr = 0; clock = 0;
3404   bzero (insn_queue, sizeof (insn_queue));
3405 
3406   /* Now, perform list scheduling.  */
3407 
3408   /* Where we start inserting insns is after TAIL.  */
3409   last = next_tail;
3410 
3411   new_needs = (NEXT_INSN (prev_head) == basic_block_head[b]
3412 	       ? NEED_HEAD : NEED_NOTHING);
3413   if (PREV_INSN (next_tail) == basic_block_end[b])
3414     new_needs |= NEED_TAIL;
3415 
3416   new_ready = n_ready;
3417   while (sched_n_insns < n_insns)
3418     {
3419       q_ptr = NEXT_Q (q_ptr); clock++;
3420 
3421       /* Add all pending insns that can be scheduled without stalls to the
3422 	 ready list.  */
3423       for (insn = insn_queue[q_ptr]; insn; insn = NEXT_INSN (insn))
3424 	{
3425 	  if (file)
3426 	    fprintf (file, ";; launching %d before %d with no stalls at T-%d\n",
3427 		     INSN_UID (insn), INSN_UID (last), clock);
3428 	  ready[new_ready++] = insn;
3429 	  q_size -= 1;
3430 	}
3431       insn_queue[q_ptr] = 0;
3432 
3433       /* If there are no ready insns, stall until one is ready and add all
3434 	 of the pending insns at that point to the ready list.  */
3435       if (new_ready == 0)
3436 	{
3437 	  register int stalls;
3438 
3439 	  for (stalls = 1; stalls < INSN_QUEUE_SIZE; stalls++)
3440 	    if (insn = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)])
3441 	      {
3442 		for (; insn; insn = NEXT_INSN (insn))
3443 		  {
3444 		    if (file)
3445 		      fprintf (file, ";; launching %d before %d with %d stalls at T-%d\n",
3446 			       INSN_UID (insn), INSN_UID (last), stalls, clock);
3447 		    ready[new_ready++] = insn;
3448 		    q_size -= 1;
3449 		  }
3450 		insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
3451 		break;
3452 	      }
3453 
3454 	  q_ptr = NEXT_Q_AFTER (q_ptr, stalls); clock += stalls;
3455 	}
3456 
3457       /* There should be some instructions waiting to fire.  */
3458       if (new_ready == 0)
3459 	abort ();
3460 
3461       if (file)
3462 	{
3463 	  fprintf (file, ";; ready list at T-%d:", clock);
3464 	  for (i = 0; i < new_ready; i++)
3465 	    fprintf (file, " %d (%x)",
3466 		     INSN_UID (ready[i]), INSN_PRIORITY (ready[i]));
3467 	}
3468 
3469       /* Sort the ready list and choose the best insn to schedule.  Select
3470 	 which insn should issue in this cycle and queue those that are
3471 	 blocked by function unit hazards.
3472 
3473 	 N_READY holds the number of items that were scheduled the last time,
3474 	 minus the one instruction scheduled on the last loop iteration; it
3475 	 is not modified for any other reason in this loop.  */
3476 
3477       SCHED_SORT (ready, new_ready, n_ready);
3478       if (MAX_BLOCKAGE > 1)
3479 	{
3480 	  new_ready = schedule_select (ready, new_ready, clock, file);
3481 	  if (new_ready == 0)
3482 	    {
3483 	      if (file)
3484 		fprintf (file, "\n");
3485 	      /* We must set n_ready here, to ensure that sorting always
3486 		 occurs when we come back to the SCHED_SORT line above.  */
3487 	      n_ready = 0;
3488 	      continue;
3489 	    }
3490 	}
3491       n_ready = new_ready;
3492       last_scheduled_insn = insn = ready[0];
3493 
3494       /* The first insn scheduled becomes the new tail.  */
3495       if (tail == 0)
3496 	tail = insn;
3497 
3498       if (file)
3499 	{
3500 	  fprintf (file, ", now");
3501 	  for (i = 0; i < n_ready; i++)
3502 	    fprintf (file, " %d", INSN_UID (ready[i]));
3503 	  fprintf (file, "\n");
3504 	}
3505 
3506       if (DONE_PRIORITY_P (insn))
3507 	abort ();
3508 
3509       if (reload_completed == 0)
3510 	{
3511 	  /* Process this insn, and each insn linked to this one which must
3512 	     be immediately output after this insn.  */
3513 	  do
3514 	    {
3515 	      /* First we kill registers set by this insn, and then we
3516 		 make registers used by this insn live.  This is the opposite
3517 		 order used above because we are traversing the instructions
3518 		 backwards.  */
3519 
3520 	      /* Strictly speaking, we should scan REG_UNUSED notes and make
3521 		 every register mentioned there live, however, we will just
3522 		 kill them again immediately below, so there doesn't seem to
3523 		 be any reason why we bother to do this.  */
3524 
3525 	      /* See if this is the last notice we must take of a register.  */
3526 	      if (GET_CODE (PATTERN (insn)) == SET
3527 		  || GET_CODE (PATTERN (insn)) == CLOBBER)
3528 		sched_note_set (b, PATTERN (insn), 1);
3529 	      else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3530 		{
3531 		  int j;
3532 		  for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3533 		    if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3534 			|| GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3535 		      sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 1);
3536 		}
3537 
3538 	      /* This code keeps life analysis information up to date.  */
3539 	      if (GET_CODE (insn) == CALL_INSN)
3540 		{
3541 		  register struct sometimes *p;
3542 
3543 		  /* A call kills all call used and global registers, except
3544 		     for those mentioned in the call pattern which will be
3545 		     made live again later.  */
3546 		  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3547 		    if (call_used_regs[i] || global_regs[i])
3548 		      {
3549 			register int offset = i / REGSET_ELT_BITS;
3550 			register REGSET_ELT_TYPE bit
3551 			  = (REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS);
3552 
3553 			bb_live_regs[offset] &= ~bit;
3554 			bb_dead_regs[offset] |= bit;
3555 		      }
3556 
3557 		  /* Regs live at the time of a call instruction must not
3558 		     go in a register clobbered by calls.  Record this for
3559 		     all regs now live.  Note that insns which are born or
3560 		     die in a call do not cross a call, so this must be done
3561 		     after the killings (above) and before the births
3562 		     (below).  */
3563 		  p = regs_sometimes_live;
3564 		  for (i = 0; i < sometimes_max; i++, p++)
3565 		    if (bb_live_regs[p->offset]
3566 			& ((REGSET_ELT_TYPE) 1 << p->bit))
3567 		      p->calls_crossed += 1;
3568 		}
3569 
3570 	      /* Make every register used live, and add REG_DEAD notes for
3571 		 registers which were not live before we started.  */
3572 	      attach_deaths_insn (insn);
3573 
3574 	      /* Find registers now made live by that instruction.  */
3575 	      for (i = 0; i < regset_size; i++)
3576 		{
3577 		  REGSET_ELT_TYPE diff = bb_live_regs[i] & ~old_live_regs[i];
3578 		  if (diff)
3579 		    {
3580 		      register int bit;
3581 		      old_live_regs[i] |= diff;
3582 		      for (bit = 0; bit < REGSET_ELT_BITS; bit++)
3583 			if (diff & ((REGSET_ELT_TYPE) 1 << bit))
3584 			  sometimes_max
3585 			    = new_sometimes_live (regs_sometimes_live, i, bit,
3586 						  sometimes_max);
3587 		    }
3588 		}
3589 
3590 	      /* Count lengths of all regs we are worrying about now,
3591 		 and handle registers no longer live.  */
3592 
3593 	      for (i = 0; i < sometimes_max; i++)
3594 		{
3595 		  register struct sometimes *p = &regs_sometimes_live[i];
3596 		  int regno = p->offset*REGSET_ELT_BITS + p->bit;
3597 
3598 		  p->live_length += 1;
3599 
3600 		  if ((bb_live_regs[p->offset]
3601 		       & ((REGSET_ELT_TYPE) 1 << p->bit)) == 0)
3602 		    {
3603 		      /* This is the end of one of this register's lifetime
3604 			 segments.  Save the lifetime info collected so far,
3605 			 and clear its bit in the old_live_regs entry.  */
3606 		      sched_reg_live_length[regno] += p->live_length;
3607 		      sched_reg_n_calls_crossed[regno] += p->calls_crossed;
3608 		      old_live_regs[p->offset]
3609 			&= ~((REGSET_ELT_TYPE) 1 << p->bit);
3610 
3611 		      /* Delete the reg_sometimes_live entry for this reg by
3612 			 copying the last entry over top of it.  */
3613 		      *p = regs_sometimes_live[--sometimes_max];
3614 		      /* ...and decrement i so that this newly copied entry
3615 			 will be processed.  */
3616 		      i--;
3617 		    }
3618 		}
3619 
3620 	      link = insn;
3621 	      insn = PREV_INSN (insn);
3622 	    }
3623 	  while (SCHED_GROUP_P (link));
3624 
3625 	  /* Set INSN back to the insn we are scheduling now.  */
3626 	  insn = ready[0];
3627 	}
3628 
3629       /* Schedule INSN.  Remove it from the ready list.  */
3630       ready += 1;
3631       n_ready -= 1;
3632 
3633       sched_n_insns += 1;
3634       NEXT_INSN (insn) = last;
3635       PREV_INSN (last) = insn;
3636       last = insn;
3637 
3638       /* Everything that precedes INSN now either becomes "ready", if
3639 	 it can execute immediately before INSN, or "pending", if
3640 	 there must be a delay.  Give INSN high enough priority that
3641 	 at least one (maybe more) reg-killing insns can be launched
3642 	 ahead of all others.  Mark INSN as scheduled by changing its
3643 	 priority to -1.  */
3644       INSN_PRIORITY (insn) = LAUNCH_PRIORITY;
3645       new_ready = schedule_insn (insn, ready, n_ready, clock);
3646       INSN_PRIORITY (insn) = DONE_PRIORITY;
3647 
3648       /* Schedule all prior insns that must not be moved.  */
3649       if (SCHED_GROUP_P (insn))
3650 	{
3651 	  /* Disable these insns from being launched.  */
3652 	  link = insn;
3653 	  while (SCHED_GROUP_P (link))
3654 	    {
3655 	      /* Disable these insns from being launched by anybody.  */
3656 	      link = PREV_INSN (link);
3657 	      INSN_REF_COUNT (link) = 0;
3658 	    }
3659 
3660 	  /* None of these insns can move forward into delay slots.  */
3661 	  while (SCHED_GROUP_P (insn))
3662 	    {
3663 	      insn = PREV_INSN (insn);
3664 	      new_ready = schedule_insn (insn, ready, new_ready, clock);
3665 	      INSN_PRIORITY (insn) = DONE_PRIORITY;
3666 
3667 	      sched_n_insns += 1;
3668 	      NEXT_INSN (insn) = last;
3669 	      PREV_INSN (last) = insn;
3670 	      last = insn;
3671 	    }
3672 	}
3673     }
3674   if (q_size != 0)
3675     abort ();
3676 
3677   if (reload_completed == 0)
3678     finish_sometimes_live (regs_sometimes_live, sometimes_max);
3679 
3680   /* HEAD is now the first insn in the chain of insns that
3681      been scheduled by the loop above.
3682      TAIL is the last of those insns.  */
3683   head = insn;
3684 
3685   /* NOTE_LIST is the end of a chain of notes previously found
3686      among the insns.  Insert them at the beginning of the insns.  */
3687   if (note_list != 0)
3688     {
3689       rtx note_head = note_list;
3690       while (PREV_INSN (note_head))
3691 	note_head = PREV_INSN (note_head);
3692 
3693       PREV_INSN (head) = note_list;
3694       NEXT_INSN (note_list) = head;
3695       head = note_head;
3696     }
3697 
3698   /* There should be no REG_DEAD notes leftover at the end.
3699      In practice, this can occur as the result of bugs in flow, combine.c,
3700      and/or sched.c.  The values of the REG_DEAD notes remaining are
3701      meaningless, because dead_notes is just used as a free list.  */
3702 #if 1
3703   if (dead_notes != 0)
3704     abort ();
3705 #endif
3706 
3707   if (new_needs & NEED_HEAD)
3708     basic_block_head[b] = head;
3709   PREV_INSN (head) = prev_head;
3710   NEXT_INSN (prev_head) = head;
3711 
3712   if (new_needs & NEED_TAIL)
3713     basic_block_end[b] = tail;
3714   NEXT_INSN (tail) = next_tail;
3715   PREV_INSN (next_tail) = tail;
3716 
3717   /* Restore the line-number notes of each insn.  */
3718   if (write_symbols != NO_DEBUG)
3719     {
3720       rtx line, note, prev, new;
3721       int notes = 0;
3722 
3723       head = basic_block_head[b];
3724       next_tail = NEXT_INSN (basic_block_end[b]);
3725 
3726       /* Determine the current line-number.  We want to know the current
3727 	 line number of the first insn of the block here, in case it is
3728 	 different from the true line number that was saved earlier.  If
3729 	 different, then we need a line number note before the first insn
3730 	 of this block.  If it happens to be the same, then we don't want to
3731 	 emit another line number note here.  */
3732       for (line = head; line; line = PREV_INSN (line))
3733 	if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3734 	  break;
3735 
3736       /* Walk the insns keeping track of the current line-number and inserting
3737 	 the line-number notes as needed.  */
3738       for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3739 	if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3740 	  line = insn;
3741 	else if (! (GET_CODE (insn) == NOTE
3742 		    && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
3743 		 && (note = LINE_NOTE (insn)) != 0
3744 		 && note != line
3745 		 && (line == 0
3746 		     || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
3747 		     || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)))
3748 	  {
3749 	    line = note;
3750 	    prev = PREV_INSN (insn);
3751 	    if (LINE_NOTE (note))
3752 	      {
3753 		/* Re-use the original line-number note. */
3754 		LINE_NOTE (note) = 0;
3755 		PREV_INSN (note) = prev;
3756 		NEXT_INSN (prev) = note;
3757 		PREV_INSN (insn) = note;
3758 		NEXT_INSN (note) = insn;
3759 	      }
3760 	    else
3761 	      {
3762 		notes++;
3763 		new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
3764 		NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
3765 	      }
3766 	  }
3767       if (file && notes)
3768 	fprintf (file, ";; added %d line-number notes\n", notes);
3769     }
3770 
3771   if (file)
3772     {
3773       fprintf (file, ";; total time = %d\n;; new basic block head = %d\n;; new basic block end = %d\n\n",
3774 	       clock, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
3775     }
3776 
3777   /* Yow! We're done!  */
3778   free_pending_lists ();
3779 
3780   return;
3781 }
3782 
3783 /* Subroutine of split_hard_reg_notes.  Searches X for any reference to
3784    REGNO, returning the rtx of the reference found if any.  Otherwise,
3785    returns 0.  */
3786 
3787 rtx
regno_use_in(regno,x)3788 regno_use_in (regno, x)
3789      int regno;
3790      rtx x;
3791 {
3792   register char *fmt;
3793   int i, j;
3794   rtx tem;
3795 
3796   if (GET_CODE (x) == REG && REGNO (x) == regno)
3797     return x;
3798 
3799   fmt = GET_RTX_FORMAT (GET_CODE (x));
3800   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3801     {
3802       if (fmt[i] == 'e')
3803 	{
3804 	  if (tem = regno_use_in (regno, XEXP (x, i)))
3805 	    return tem;
3806 	}
3807       else if (fmt[i] == 'E')
3808 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3809 	  if (tem = regno_use_in (regno , XVECEXP (x, i, j)))
3810 	    return tem;
3811     }
3812 
3813   return 0;
3814 }
3815 
3816 /* Subroutine of update_flow_info.  Determines whether any new REG_NOTEs are
3817    needed for the hard register mentioned in the note.  This can happen
3818    if the reference to the hard register in the original insn was split into
3819    several smaller hard register references in the split insns.  */
3820 
3821 static void
split_hard_reg_notes(note,first,last,orig_insn)3822 split_hard_reg_notes (note, first, last, orig_insn)
3823      rtx note, first, last, orig_insn;
3824 {
3825   rtx reg, temp, link;
3826   int n_regs, i, new_reg;
3827   rtx insn;
3828 
3829   /* Assume that this is a REG_DEAD note.  */
3830   if (REG_NOTE_KIND (note) != REG_DEAD)
3831     abort ();
3832 
3833   reg = XEXP (note, 0);
3834 
3835   n_regs = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
3836 
3837   for (i = 0; i < n_regs; i++)
3838     {
3839       new_reg = REGNO (reg) + i;
3840 
3841       /* Check for references to new_reg in the split insns.  */
3842       for (insn = last; ; insn = PREV_INSN (insn))
3843 	{
3844 	  if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3845 	      && (temp = regno_use_in (new_reg, PATTERN (insn))))
3846 	    {
3847 	      /* Create a new reg dead note here.  */
3848 	      link = rtx_alloc (EXPR_LIST);
3849 	      PUT_REG_NOTE_KIND (link, REG_DEAD);
3850 	      XEXP (link, 0) = temp;
3851 	      XEXP (link, 1) = REG_NOTES (insn);
3852 	      REG_NOTES (insn) = link;
3853 
3854 	      /* If killed multiple registers here, then add in the excess.  */
3855 	      i += HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) - 1;
3856 
3857 	      break;
3858 	    }
3859 	  /* It isn't mentioned anywhere, so no new reg note is needed for
3860 	     this register.  */
3861 	  if (insn == first)
3862 	    break;
3863 	}
3864     }
3865 }
3866 
3867 /* Subroutine of update_flow_info.  Determines whether a SET or CLOBBER in an
3868    insn created by splitting needs a REG_DEAD or REG_UNUSED note added.  */
3869 
3870 static void
new_insn_dead_notes(pat,insn,last,orig_insn)3871 new_insn_dead_notes (pat, insn, last, orig_insn)
3872      rtx pat, insn, last, orig_insn;
3873 {
3874   rtx dest, tem, set;
3875 
3876   /* PAT is either a CLOBBER or a SET here.  */
3877   dest = XEXP (pat, 0);
3878 
3879   while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3880 	 || GET_CODE (dest) == STRICT_LOW_PART
3881 	 || GET_CODE (dest) == SIGN_EXTRACT)
3882     dest = XEXP (dest, 0);
3883 
3884   if (GET_CODE (dest) == REG)
3885     {
3886       for (tem = last; tem != insn; tem = PREV_INSN (tem))
3887 	{
3888 	  if (GET_RTX_CLASS (GET_CODE (tem)) == 'i'
3889 	      && reg_overlap_mentioned_p (dest, PATTERN (tem))
3890 	      && (set = single_set (tem)))
3891 	    {
3892 	      rtx tem_dest = SET_DEST (set);
3893 
3894 	      while (GET_CODE (tem_dest) == ZERO_EXTRACT
3895 		     || GET_CODE (tem_dest) == SUBREG
3896 		     || GET_CODE (tem_dest) == STRICT_LOW_PART
3897 		     || GET_CODE (tem_dest) == SIGN_EXTRACT)
3898 		tem_dest = XEXP (tem_dest, 0);
3899 
3900 	      if (tem_dest != dest)
3901 		{
3902 		  /* Use the same scheme as combine.c, don't put both REG_DEAD
3903 		     and REG_UNUSED notes on the same insn.  */
3904 		  if (! find_regno_note (tem, REG_UNUSED, REGNO (dest))
3905 		      && ! find_regno_note (tem, REG_DEAD, REGNO (dest)))
3906 		    {
3907 		      rtx note = rtx_alloc (EXPR_LIST);
3908 		      PUT_REG_NOTE_KIND (note, REG_DEAD);
3909 		      XEXP (note, 0) = dest;
3910 		      XEXP (note, 1) = REG_NOTES (tem);
3911 		      REG_NOTES (tem) = note;
3912 		    }
3913 		  /* The reg only dies in one insn, the last one that uses
3914 		     it.  */
3915 		  break;
3916 		}
3917 	      else if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
3918 		/* We found an instruction that both uses the register,
3919 		   and sets it, so no new REG_NOTE is needed for this set.  */
3920 		break;
3921 	    }
3922 	}
3923       /* If this is a set, it must die somewhere, unless it is the dest of
3924 	 the original insn, and hence is live after the original insn.  Abort
3925 	 if it isn't supposed to be live after the original insn.
3926 
3927 	 If this is a clobber, then just add a REG_UNUSED note.  */
3928       if (tem == insn)
3929 	{
3930 	  int live_after_orig_insn = 0;
3931 	  rtx pattern = PATTERN (orig_insn);
3932 	  int i;
3933 
3934 	  if (GET_CODE (pat) == CLOBBER)
3935 	    {
3936 	      rtx note = rtx_alloc (EXPR_LIST);
3937 	      PUT_REG_NOTE_KIND (note, REG_UNUSED);
3938 	      XEXP (note, 0) = dest;
3939 	      XEXP (note, 1) = REG_NOTES (insn);
3940 	      REG_NOTES (insn) = note;
3941 	      return;
3942 	    }
3943 
3944 	  /* The original insn could have multiple sets, so search the
3945 	     insn for all sets.  */
3946 	  if (GET_CODE (pattern) == SET)
3947 	    {
3948 	      if (reg_overlap_mentioned_p (dest, SET_DEST (pattern)))
3949 		live_after_orig_insn = 1;
3950 	    }
3951 	  else if (GET_CODE (pattern) == PARALLEL)
3952 	    {
3953 	      for (i = 0; i < XVECLEN (pattern, 0); i++)
3954 		if (GET_CODE (XVECEXP (pattern, 0, i)) == SET
3955 		    && reg_overlap_mentioned_p (dest,
3956 						SET_DEST (XVECEXP (pattern,
3957 								   0, i))))
3958 		  live_after_orig_insn = 1;
3959 	    }
3960 
3961 	  if (! live_after_orig_insn)
3962 	    abort ();
3963 	}
3964     }
3965 }
3966 
3967 /* Subroutine of update_flow_info.  Update the value of reg_n_sets for all
3968    registers modified by X.  INC is -1 if the containing insn is being deleted,
3969    and is 1 if the containing insn is a newly generated insn.  */
3970 
3971 static void
update_n_sets(x,inc)3972 update_n_sets (x, inc)
3973      rtx x;
3974      int inc;
3975 {
3976   rtx dest = SET_DEST (x);
3977 
3978   while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
3979 	 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
3980     dest = SUBREG_REG (dest);
3981 
3982   if (GET_CODE (dest) == REG)
3983     {
3984       int regno = REGNO (dest);
3985 
3986       if (regno < FIRST_PSEUDO_REGISTER)
3987 	{
3988 	  register int i;
3989 	  int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (dest));
3990 
3991 	  for (i = regno; i < endregno; i++)
3992 	    reg_n_sets[i] += inc;
3993 	}
3994       else
3995 	reg_n_sets[regno] += inc;
3996     }
3997 }
3998 
3999 /* Updates all flow-analysis related quantities (including REG_NOTES) for
4000    the insns from FIRST to LAST inclusive that were created by splitting
4001    ORIG_INSN.  NOTES are the original REG_NOTES.  */
4002 
4003 static void
update_flow_info(notes,first,last,orig_insn)4004 update_flow_info (notes, first, last, orig_insn)
4005      rtx notes;
4006      rtx first, last;
4007      rtx orig_insn;
4008 {
4009   rtx insn, note;
4010   rtx next;
4011   rtx orig_dest, temp;
4012   rtx set;
4013 
4014   /* Get and save the destination set by the original insn.  */
4015 
4016   orig_dest = single_set (orig_insn);
4017   if (orig_dest)
4018     orig_dest = SET_DEST (orig_dest);
4019 
4020   /* Move REG_NOTES from the original insn to where they now belong.  */
4021 
4022   for (note = notes; note; note = next)
4023     {
4024       next = XEXP (note, 1);
4025       switch (REG_NOTE_KIND (note))
4026 	{
4027 	case REG_DEAD:
4028 	case REG_UNUSED:
4029 	  /* Move these notes from the original insn to the last new insn where
4030 	     the register is now set.  */
4031 
4032 	  for (insn = last; ; insn = PREV_INSN (insn))
4033 	    {
4034 	      if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4035 		  && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
4036 		{
4037 		  /* If this note refers to a multiple word hard register, it
4038 		     may have been split into several smaller hard register
4039 		     references, so handle it specially.  */
4040 		  temp = XEXP (note, 0);
4041 		  if (REG_NOTE_KIND (note) == REG_DEAD
4042 		      && GET_CODE (temp) == REG
4043 		      && REGNO (temp) < FIRST_PSEUDO_REGISTER
4044 		      && HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) > 1)
4045 		    split_hard_reg_notes (note, first, last, orig_insn);
4046 		  else
4047 		    {
4048 		      XEXP (note, 1) = REG_NOTES (insn);
4049 		      REG_NOTES (insn) = note;
4050 		    }
4051 
4052 		  /* Sometimes need to convert REG_UNUSED notes to REG_DEAD
4053 		     notes.  */
4054 		  /* ??? This won't handle multiple word registers correctly,
4055 		     but should be good enough for now.  */
4056 		  if (REG_NOTE_KIND (note) == REG_UNUSED
4057 		      && ! dead_or_set_p (insn, XEXP (note, 0)))
4058 		    PUT_REG_NOTE_KIND (note, REG_DEAD);
4059 
4060 		  /* The reg only dies in one insn, the last one that uses
4061 		     it.  */
4062 		  break;
4063 		}
4064 	      /* It must die somewhere, fail it we couldn't find where it died.
4065 
4066 		 If this is a REG_UNUSED note, then it must be a temporary
4067 		 register that was not needed by this instantiation of the
4068 		 pattern, so we can safely ignore it.  */
4069 	      if (insn == first)
4070 		{
4071 		  if (REG_NOTE_KIND (note) != REG_UNUSED)
4072 		    abort ();
4073 
4074 		  break;
4075 		}
4076 	    }
4077 	  break;
4078 
4079 	case REG_WAS_0:
4080 	  /* This note applies to the dest of the original insn.  Find the
4081 	     first new insn that now has the same dest, and move the note
4082 	     there.  */
4083 
4084 	  if (! orig_dest)
4085 	    abort ();
4086 
4087 	  for (insn = first; ; insn = NEXT_INSN (insn))
4088 	    {
4089 	      if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4090 		  && (temp = single_set (insn))
4091 		  && rtx_equal_p (SET_DEST (temp), orig_dest))
4092 		{
4093 		  XEXP (note, 1) = REG_NOTES (insn);
4094 		  REG_NOTES (insn) = note;
4095 		  /* The reg is only zero before one insn, the first that
4096 		     uses it.  */
4097 		  break;
4098 		}
4099 	      /* It must be set somewhere, fail if we couldn't find where it
4100 		 was set.  */
4101 	      if (insn == last)
4102 		abort ();
4103 	    }
4104 	  break;
4105 
4106 	case REG_EQUAL:
4107 	case REG_EQUIV:
4108 	  /* A REG_EQUIV or REG_EQUAL note on an insn with more than one
4109 	     set is meaningless.  Just drop the note.  */
4110 	  if (! orig_dest)
4111 	    break;
4112 
4113 	case REG_NO_CONFLICT:
4114 	  /* These notes apply to the dest of the original insn.  Find the last
4115 	     new insn that now has the same dest, and move the note there.  */
4116 
4117 	  if (! orig_dest)
4118 	    abort ();
4119 
4120 	  for (insn = last; ; insn = PREV_INSN (insn))
4121 	    {
4122 	      if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4123 		  && (temp = single_set (insn))
4124 		  && rtx_equal_p (SET_DEST (temp), orig_dest))
4125 		{
4126 		  XEXP (note, 1) = REG_NOTES (insn);
4127 		  REG_NOTES (insn) = note;
4128 		  /* Only put this note on one of the new insns.  */
4129 		  break;
4130 		}
4131 
4132 	      /* The original dest must still be set someplace.  Abort if we
4133 		 couldn't find it.  */
4134 	      if (insn == first)
4135 		abort ();
4136 	    }
4137 	  break;
4138 
4139 	case REG_LIBCALL:
4140 	  /* Move a REG_LIBCALL note to the first insn created, and update
4141 	     the corresponding REG_RETVAL note.  */
4142 	  XEXP (note, 1) = REG_NOTES (first);
4143 	  REG_NOTES (first) = note;
4144 
4145 	  insn = XEXP (note, 0);
4146 	  note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
4147 	  if (note)
4148 	    XEXP (note, 0) = first;
4149 	  break;
4150 
4151 	case REG_RETVAL:
4152 	  /* Move a REG_RETVAL note to the last insn created, and update
4153 	     the corresponding REG_LIBCALL note.  */
4154 	  XEXP (note, 1) = REG_NOTES (last);
4155 	  REG_NOTES (last) = note;
4156 
4157 	  insn = XEXP (note, 0);
4158 	  note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
4159 	  if (note)
4160 	    XEXP (note, 0) = last;
4161 	  break;
4162 
4163 	case REG_NONNEG:
4164 	  /* This should be moved to whichever instruction is a JUMP_INSN.  */
4165 
4166 	  for (insn = last; ; insn = PREV_INSN (insn))
4167 	    {
4168 	      if (GET_CODE (insn) == JUMP_INSN)
4169 		{
4170 		  XEXP (note, 1) = REG_NOTES (insn);
4171 		  REG_NOTES (insn) = note;
4172 		  /* Only put this note on one of the new insns.  */
4173 		  break;
4174 		}
4175 	      /* Fail if we couldn't find a JUMP_INSN.  */
4176 	      if (insn == first)
4177 		abort ();
4178 	    }
4179 	  break;
4180 
4181 	case REG_INC:
4182 	  /* This should be moved to whichever instruction now has the
4183 	     increment operation.  */
4184 	  abort ();
4185 
4186 	case REG_LABEL:
4187 	  /* Should be moved to the new insn(s) which use the label.  */
4188 	  for (insn = first; insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
4189 	    if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4190 		&& reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
4191 	      REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_LABEL,
4192 					  XEXP (note, 0), REG_NOTES (insn));
4193 	  break;
4194 
4195 	case REG_CC_SETTER:
4196 	case REG_CC_USER:
4197 	  /* These two notes will never appear until after reorg, so we don't
4198 	     have to handle them here.  */
4199 	default:
4200 	  abort ();
4201 	}
4202     }
4203 
4204   /* Each new insn created, except the last, has a new set.  If the destination
4205      is a register, then this reg is now live across several insns, whereas
4206      previously the dest reg was born and died within the same insn.  To
4207      reflect this, we now need a REG_DEAD note on the insn where this
4208      dest reg dies.
4209 
4210      Similarly, the new insns may have clobbers that need REG_UNUSED notes.  */
4211 
4212   for (insn = first; insn != last; insn = NEXT_INSN (insn))
4213     {
4214       rtx pat;
4215       int i;
4216 
4217       pat = PATTERN (insn);
4218       if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
4219 	new_insn_dead_notes (pat, insn, last, orig_insn);
4220       else if (GET_CODE (pat) == PARALLEL)
4221 	{
4222 	  for (i = 0; i < XVECLEN (pat, 0); i++)
4223 	    if (GET_CODE (XVECEXP (pat, 0, i)) == SET
4224 		|| GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER)
4225 	      new_insn_dead_notes (XVECEXP (pat, 0, i), insn, last, orig_insn);
4226 	}
4227     }
4228 
4229   /* If any insn, except the last, uses the register set by the last insn,
4230      then we need a new REG_DEAD note on that insn.  In this case, there
4231      would not have been a REG_DEAD note for this register in the original
4232      insn because it was used and set within one insn.
4233 
4234      There is no new REG_DEAD note needed if the last insn uses the register
4235      that it is setting.  */
4236 
4237   set = single_set (last);
4238   if (set)
4239     {
4240       rtx dest = SET_DEST (set);
4241 
4242       while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
4243 	     || GET_CODE (dest) == STRICT_LOW_PART
4244 	     || GET_CODE (dest) == SIGN_EXTRACT)
4245 	dest = XEXP (dest, 0);
4246 
4247       if (GET_CODE (dest) == REG
4248 	  && ! reg_overlap_mentioned_p (dest, SET_SRC (set)))
4249 	{
4250 	  for (insn = PREV_INSN (last); ; insn = PREV_INSN (insn))
4251 	    {
4252 	      if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4253 		  && reg_mentioned_p (dest, PATTERN (insn))
4254 		  && (set = single_set (insn)))
4255 		{
4256 		  rtx insn_dest = SET_DEST (set);
4257 
4258 		  while (GET_CODE (insn_dest) == ZERO_EXTRACT
4259 			 || GET_CODE (insn_dest) == SUBREG
4260 			 || GET_CODE (insn_dest) == STRICT_LOW_PART
4261 			 || GET_CODE (insn_dest) == SIGN_EXTRACT)
4262 		    insn_dest = XEXP (insn_dest, 0);
4263 
4264 		  if (insn_dest != dest)
4265 		    {
4266 		      note = rtx_alloc (EXPR_LIST);
4267 		      PUT_REG_NOTE_KIND (note, REG_DEAD);
4268 		      XEXP (note, 0) = dest;
4269 		      XEXP (note, 1) = REG_NOTES (insn);
4270 		      REG_NOTES (insn) = note;
4271 		      /* The reg only dies in one insn, the last one
4272 			 that uses it.  */
4273 		      break;
4274 		    }
4275 		}
4276 	      if (insn == first)
4277 		break;
4278 	    }
4279 	}
4280     }
4281 
4282   /* If the original dest is modifying a multiple register target, and the
4283      original instruction was split such that the original dest is now set
4284      by two or more SUBREG sets, then the split insns no longer kill the
4285      destination of the original insn.
4286 
4287      In this case, if there exists an instruction in the same basic block,
4288      before the split insn, which uses the original dest, and this use is
4289      killed by the original insn, then we must remove the REG_DEAD note on
4290      this insn, because it is now superfluous.
4291 
4292      This does not apply when a hard register gets split, because the code
4293      knows how to handle overlapping hard registers properly.  */
4294   if (orig_dest && GET_CODE (orig_dest) == REG)
4295     {
4296       int found_orig_dest = 0;
4297       int found_split_dest = 0;
4298 
4299       for (insn = first; ; insn = NEXT_INSN (insn))
4300 	{
4301 	  set = single_set (insn);
4302 	  if (set)
4303 	    {
4304 	      if (GET_CODE (SET_DEST (set)) == REG
4305 		  && REGNO (SET_DEST (set)) == REGNO (orig_dest))
4306 		{
4307 		  found_orig_dest = 1;
4308 		  break;
4309 		}
4310 	      else if (GET_CODE (SET_DEST (set)) == SUBREG
4311 		       && SUBREG_REG (SET_DEST (set)) == orig_dest)
4312 		{
4313 		  found_split_dest = 1;
4314 		  break;
4315 		}
4316 	    }
4317 
4318 	  if (insn == last)
4319 	    break;
4320 	}
4321 
4322       if (found_split_dest)
4323 	{
4324 	  /* Search backwards from FIRST, looking for the first insn that uses
4325 	     the original dest.  Stop if we pass a CODE_LABEL or a JUMP_INSN.
4326 	     If we find an insn, and it has a REG_DEAD note, then delete the
4327 	     note.  */
4328 
4329 	  for (insn = first; insn; insn = PREV_INSN (insn))
4330 	    {
4331 	      if (GET_CODE (insn) == CODE_LABEL
4332 		  || GET_CODE (insn) == JUMP_INSN)
4333 		break;
4334 	      else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4335 		       && reg_mentioned_p (orig_dest, insn))
4336 		{
4337 		  note = find_regno_note (insn, REG_DEAD, REGNO (orig_dest));
4338 		  if (note)
4339 		    remove_note (insn, note);
4340 		}
4341 	    }
4342 	}
4343       else if (! found_orig_dest)
4344 	{
4345 	  /* This should never happen.  */
4346 	  abort ();
4347 	}
4348     }
4349 
4350   /* Update reg_n_sets.  This is necessary to prevent local alloc from
4351      converting REG_EQUAL notes to REG_EQUIV when splitting has modified
4352      a reg from set once to set multiple times.  */
4353 
4354   {
4355     rtx x = PATTERN (orig_insn);
4356     RTX_CODE code = GET_CODE (x);
4357 
4358     if (code == SET || code == CLOBBER)
4359       update_n_sets (x, -1);
4360     else if (code == PARALLEL)
4361       {
4362 	int i;
4363 	for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4364 	  {
4365 	    code = GET_CODE (XVECEXP (x, 0, i));
4366 	    if (code == SET || code == CLOBBER)
4367 	      update_n_sets (XVECEXP (x, 0, i), -1);
4368 	  }
4369       }
4370 
4371     for (insn = first; ; insn = NEXT_INSN (insn))
4372       {
4373 	x = PATTERN (insn);
4374 	code = GET_CODE (x);
4375 
4376 	if (code == SET || code == CLOBBER)
4377 	  update_n_sets (x, 1);
4378 	else if (code == PARALLEL)
4379 	  {
4380 	    int i;
4381 	    for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4382 	      {
4383 		code = GET_CODE (XVECEXP (x, 0, i));
4384 		if (code == SET || code == CLOBBER)
4385 		  update_n_sets (XVECEXP (x, 0, i), 1);
4386 	      }
4387 	  }
4388 
4389 	if (insn == last)
4390 	  break;
4391       }
4392   }
4393 }
4394 
4395 /* The one entry point in this file.  DUMP_FILE is the dump file for
4396    this pass.  */
4397 
4398 void
schedule_insns(dump_file)4399 schedule_insns (dump_file)
4400      FILE *dump_file;
4401 {
4402   int max_uid = MAX_INSNS_PER_SPLIT * (get_max_uid () + 1);
4403   int i, b;
4404   rtx insn;
4405 
4406   /* Taking care of this degenerate case makes the rest of
4407      this code simpler.  */
4408   if (n_basic_blocks == 0)
4409     return;
4410 
4411   /* Create an insn here so that we can hang dependencies off of it later.  */
4412   sched_before_next_call
4413     = gen_rtx (INSN, VOIDmode, 0, NULL_RTX, NULL_RTX,
4414 	       NULL_RTX, 0, NULL_RTX, 0);
4415 
4416   /* Initialize the unused_*_lists.  We can't use the ones left over from
4417      the previous function, because gcc has freed that memory.  We can use
4418      the ones left over from the first sched pass in the second pass however,
4419      so only clear them on the first sched pass.  The first pass is before
4420      reload if flag_schedule_insns is set, otherwise it is afterwards.  */
4421 
4422   if (reload_completed == 0 || ! flag_schedule_insns)
4423     {
4424       unused_insn_list = 0;
4425       unused_expr_list = 0;
4426     }
4427 
4428   /* We create no insns here, only reorder them, so we
4429      remember how far we can cut back the stack on exit.  */
4430 
4431   /* Allocate data for this pass.  See comments, above,
4432      for what these vectors do.  */
4433   insn_luid = (int *) alloca (max_uid * sizeof (int));
4434   insn_priority = (int *) alloca (max_uid * sizeof (int));
4435   insn_tick = (int *) alloca (max_uid * sizeof (int));
4436   insn_costs = (short *) alloca (max_uid * sizeof (short));
4437   insn_units = (short *) alloca (max_uid * sizeof (short));
4438   insn_blockage = (unsigned int *) alloca (max_uid * sizeof (unsigned int));
4439   insn_ref_count = (int *) alloca (max_uid * sizeof (int));
4440 
4441   if (reload_completed == 0)
4442     {
4443       sched_reg_n_deaths = (short *) alloca (max_regno * sizeof (short));
4444       sched_reg_n_calls_crossed = (int *) alloca (max_regno * sizeof (int));
4445       sched_reg_live_length = (int *) alloca (max_regno * sizeof (int));
4446       bb_dead_regs = (regset) alloca (regset_bytes);
4447       bb_live_regs = (regset) alloca (regset_bytes);
4448       bzero (sched_reg_n_calls_crossed, max_regno * sizeof (int));
4449       bzero (sched_reg_live_length, max_regno * sizeof (int));
4450       bcopy (reg_n_deaths, sched_reg_n_deaths, max_regno * sizeof (short));
4451       init_alias_analysis ();
4452     }
4453   else
4454     {
4455       sched_reg_n_deaths = 0;
4456       sched_reg_n_calls_crossed = 0;
4457       sched_reg_live_length = 0;
4458       bb_dead_regs = 0;
4459       bb_live_regs = 0;
4460       if (! flag_schedule_insns)
4461 	init_alias_analysis ();
4462     }
4463 
4464   if (write_symbols != NO_DEBUG)
4465     {
4466       rtx line;
4467 
4468       line_note = (rtx *) alloca (max_uid * sizeof (rtx));
4469       bzero (line_note, max_uid * sizeof (rtx));
4470       line_note_head = (rtx *) alloca (n_basic_blocks * sizeof (rtx));
4471       bzero (line_note_head, n_basic_blocks * sizeof (rtx));
4472 
4473       /* Determine the line-number at the start of each basic block.
4474 	 This must be computed and saved now, because after a basic block's
4475 	 predecessor has been scheduled, it is impossible to accurately
4476 	 determine the correct line number for the first insn of the block.  */
4477 
4478       for (b = 0; b < n_basic_blocks; b++)
4479 	for (line = basic_block_head[b]; line; line = PREV_INSN (line))
4480 	  if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
4481 	    {
4482 	      line_note_head[b] = line;
4483 	      break;
4484 	    }
4485     }
4486 
4487   bzero (insn_luid, max_uid * sizeof (int));
4488   bzero (insn_priority, max_uid * sizeof (int));
4489   bzero (insn_tick, max_uid * sizeof (int));
4490   bzero (insn_costs, max_uid * sizeof (short));
4491   bzero (insn_units, max_uid * sizeof (short));
4492   bzero (insn_blockage, max_uid * sizeof (unsigned int));
4493   bzero (insn_ref_count, max_uid * sizeof (int));
4494 
4495   /* Schedule each basic block, block by block.  */
4496 
4497   if (NEXT_INSN (basic_block_end[n_basic_blocks-1]) == 0
4498       || (GET_CODE (basic_block_end[n_basic_blocks-1]) != NOTE
4499 	  && GET_CODE (basic_block_end[n_basic_blocks-1]) != CODE_LABEL))
4500     emit_note_after (NOTE_INSN_DELETED, basic_block_end[n_basic_blocks-1]);
4501 
4502   for (b = 0; b < n_basic_blocks; b++)
4503     {
4504       rtx insn, next;
4505       rtx insns;
4506 
4507       note_list = 0;
4508 
4509       for (insn = basic_block_head[b]; ; insn = next)
4510 	{
4511 	  rtx prev;
4512 	  rtx set;
4513 
4514 	  /* Can't use `next_real_insn' because that
4515 	     might go across CODE_LABELS and short-out basic blocks.  */
4516 	  next = NEXT_INSN (insn);
4517 	  if (GET_CODE (insn) != INSN)
4518 	    {
4519 	      if (insn == basic_block_end[b])
4520 		break;
4521 
4522 	      continue;
4523 	    }
4524 
4525 	  /* Don't split no-op move insns.  These should silently disappear
4526 	     later in final.  Splitting such insns would break the code
4527 	     that handles REG_NO_CONFLICT blocks.  */
4528 	  set = single_set (insn);
4529 	  if (set && rtx_equal_p (SET_SRC (set), SET_DEST (set)))
4530 	    {
4531 	      if (insn == basic_block_end[b])
4532 		break;
4533 
4534 	      /* Nops get in the way while scheduling, so delete them now if
4535 		 register allocation has already been done.  It is too risky
4536 		 to try to do this before register allocation, and there are
4537 		 unlikely to be very many nops then anyways.  */
4538 	      if (reload_completed)
4539 		{
4540 		  PUT_CODE (insn, NOTE);
4541 		  NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4542 		  NOTE_SOURCE_FILE (insn) = 0;
4543 		}
4544 
4545 	      continue;
4546 	    }
4547 
4548 	  /* Split insns here to get max fine-grain parallelism.  */
4549 	  prev = PREV_INSN (insn);
4550 	  if (reload_completed == 0)
4551 	    {
4552 	      rtx last, first = PREV_INSN (insn);
4553 	      rtx notes = REG_NOTES (insn);
4554 
4555 	      last = try_split (PATTERN (insn), insn, 1);
4556 	      if (last != insn)
4557 		{
4558 		  /* try_split returns the NOTE that INSN became.  */
4559 		  first = NEXT_INSN (first);
4560 		  update_flow_info (notes, first, last, insn);
4561 
4562 		  PUT_CODE (insn, NOTE);
4563 		  NOTE_SOURCE_FILE (insn) = 0;
4564 		  NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4565 		  if (insn == basic_block_head[b])
4566 		    basic_block_head[b] = first;
4567 		  if (insn == basic_block_end[b])
4568 		    {
4569 		      basic_block_end[b] = last;
4570 		      break;
4571 		    }
4572 		}
4573 	    }
4574 
4575 	  if (insn == basic_block_end[b])
4576 	    break;
4577 	}
4578 
4579       schedule_block (b, dump_file);
4580 
4581 #ifdef USE_C_ALLOCA
4582       alloca (0);
4583 #endif
4584     }
4585 
4586   /* Reposition the prologue and epilogue notes in case we moved the
4587      prologue/epilogue insns.  */
4588   if (reload_completed)
4589     reposition_prologue_and_epilogue_notes (get_insns ());
4590 
4591   if (write_symbols != NO_DEBUG)
4592     {
4593       rtx line = 0;
4594       rtx insn = get_insns ();
4595       int active_insn = 0;
4596       int notes = 0;
4597 
4598       /* Walk the insns deleting redundant line-number notes.  Many of these
4599 	 are already present.  The remainder tend to occur at basic
4600 	 block boundaries.  */
4601       for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
4602 	if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
4603 	  {
4604 	    /* If there are no active insns following, INSN is redundant.  */
4605 	    if (active_insn == 0)
4606 	      {
4607 		notes++;
4608 		NOTE_SOURCE_FILE (insn) = 0;
4609 		NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4610 	      }
4611 	    /* If the line number is unchanged, LINE is redundant.  */
4612 	    else if (line
4613 		     && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
4614 		     && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn))
4615 	      {
4616 		notes++;
4617 		NOTE_SOURCE_FILE (line) = 0;
4618 		NOTE_LINE_NUMBER (line) = NOTE_INSN_DELETED;
4619 		line = insn;
4620 	      }
4621 	    else
4622 	      line = insn;
4623 	    active_insn = 0;
4624 	  }
4625 	else if (! ((GET_CODE (insn) == NOTE
4626 		     && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
4627 		    || (GET_CODE (insn) == INSN
4628 			&& (GET_CODE (PATTERN (insn)) == USE
4629 			    || GET_CODE (PATTERN (insn)) == CLOBBER))))
4630 	  active_insn++;
4631 
4632       if (dump_file && notes)
4633 	fprintf (dump_file, ";; deleted %d line-number notes\n", notes);
4634     }
4635 
4636   if (reload_completed == 0)
4637     {
4638       int regno;
4639       for (regno = 0; regno < max_regno; regno++)
4640 	if (sched_reg_live_length[regno])
4641 	  {
4642 	    if (dump_file)
4643 	      {
4644 		if (reg_live_length[regno] > sched_reg_live_length[regno])
4645 		  fprintf (dump_file,
4646 			   ";; register %d life shortened from %d to %d\n",
4647 			   regno, reg_live_length[regno],
4648 			   sched_reg_live_length[regno]);
4649 		/* Negative values are special; don't overwrite the current
4650 		   reg_live_length value if it is negative.  */
4651 		else if (reg_live_length[regno] < sched_reg_live_length[regno]
4652 			 && reg_live_length[regno] >= 0)
4653 		  fprintf (dump_file,
4654 			   ";; register %d life extended from %d to %d\n",
4655 			   regno, reg_live_length[regno],
4656 			   sched_reg_live_length[regno]);
4657 
4658 		if (reg_n_calls_crossed[regno]
4659 		    && ! sched_reg_n_calls_crossed[regno])
4660 		  fprintf (dump_file,
4661 			   ";; register %d no longer crosses calls\n", regno);
4662 		else if (! reg_n_calls_crossed[regno]
4663 			 && sched_reg_n_calls_crossed[regno])
4664 		  fprintf (dump_file,
4665 			   ";; register %d now crosses calls\n", regno);
4666 	      }
4667 	    /* Negative values are special; don't overwrite the current
4668 	       reg_live_length value if it is negative.  */
4669 	    if (reg_live_length[regno] >= 0)
4670 	      reg_live_length[regno] = sched_reg_live_length[regno];
4671 	    reg_n_calls_crossed[regno] = sched_reg_n_calls_crossed[regno];
4672 	  }
4673     }
4674 }
4675 #endif /* INSN_SCHEDULING */
4676