xref: /dragonfly/contrib/gcc-4.7/gcc/sel-sched.c (revision 6ca88057)
1 /* Instruction scheduling pass.  Selective scheduler and pipeliner.
2    Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011
3    Free Software Foundation, Inc.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl-error.h"
26 #include "tm_p.h"
27 #include "hard-reg-set.h"
28 #include "regs.h"
29 #include "function.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "insn-attr.h"
33 #include "except.h"
34 #include "recog.h"
35 #include "params.h"
36 #include "target.h"
37 #include "output.h"
38 #include "timevar.h"
39 #include "tree-pass.h"
40 #include "sched-int.h"
41 #include "ggc.h"
42 #include "tree.h"
43 #include "vec.h"
44 #include "langhooks.h"
45 #include "rtlhooks-def.h"
46 #include "output.h"
47 #include "emit-rtl.h"
48 #include "ira.h"
49 
50 #ifdef INSN_SCHEDULING
51 #include "sel-sched-ir.h"
52 #include "sel-sched-dump.h"
53 #include "sel-sched.h"
54 #include "dbgcnt.h"
55 
56 /* Implementation of selective scheduling approach.
57    The below implementation follows the original approach with the following
58    changes:
59 
60    o the scheduler works after register allocation (but can be also tuned
61    to work before RA);
62    o some instructions are not copied or register renamed;
63    o conditional jumps are not moved with code duplication;
64    o several jumps in one parallel group are not supported;
65    o when pipelining outer loops, code motion through inner loops
66    is not supported;
67    o control and data speculation are supported;
68    o some improvements for better compile time/performance were made.
69 
70    Terminology
71    ===========
72 
73    A vinsn, or virtual insn, is an insn with additional data characterizing
74    insn pattern, such as LHS, RHS, register sets used/set/clobbered, etc.
75    Vinsns also act as smart pointers to save memory by reusing them in
76    different expressions.  A vinsn is described by vinsn_t type.
77 
78    An expression is a vinsn with additional data characterizing its properties
79    at some point in the control flow graph.  The data may be its usefulness,
80    priority, speculative status, whether it was renamed/subsituted, etc.
81    An expression is described by expr_t type.
82 
83    Availability set (av_set) is a set of expressions at a given control flow
84    point. It is represented as av_set_t.  The expressions in av sets are kept
85    sorted in the terms of expr_greater_p function.  It allows to truncate
86    the set while leaving the best expressions.
87 
88    A fence is a point through which code motion is prohibited.  On each step,
89    we gather a parallel group of insns at a fence.  It is possible to have
90    multiple fences. A fence is represented via fence_t.
91 
92    A boundary is the border between the fence group and the rest of the code.
93    Currently, we never have more than one boundary per fence, as we finalize
94    the fence group when a jump is scheduled. A boundary is represented
95    via bnd_t.
96 
97    High-level overview
98    ===================
99 
100    The scheduler finds regions to schedule, schedules each one, and finalizes.
101    The regions are formed starting from innermost loops, so that when the inner
102    loop is pipelined, its prologue can be scheduled together with yet unprocessed
103    outer loop. The rest of acyclic regions are found using extend_rgns:
104    the blocks that are not yet allocated to any regions are traversed in top-down
105    order, and a block is added to a region to which all its predecessors belong;
106    otherwise, the block starts its own region.
107 
108    The main scheduling loop (sel_sched_region_2) consists of just
109    scheduling on each fence and updating fences.  For each fence,
110    we fill a parallel group of insns (fill_insns) until some insns can be added.
111    First, we compute available exprs (av-set) at the boundary of the current
112    group.  Second, we choose the best expression from it.  If the stall is
113    required to schedule any of the expressions, we advance the current cycle
114    appropriately.  So, the final group does not exactly correspond to a VLIW
115    word.  Third, we move the chosen expression to the boundary (move_op)
116    and update the intermediate av sets and liveness sets.  We quit fill_insns
117    when either no insns left for scheduling or we have scheduled enough insns
118    so we feel like advancing a scheduling point.
119 
120    Computing available expressions
121    ===============================
122 
123    The computation (compute_av_set) is a bottom-up traversal.  At each insn,
124    we're moving the union of its successors' sets through it via
125    moveup_expr_set.  The dependent expressions are removed.  Local
126    transformations (substitution, speculation) are applied to move more
127    exprs.  Then the expr corresponding to the current insn is added.
128    The result is saved on each basic block header.
129 
130    When traversing the CFG, we're moving down for no more than max_ws insns.
131    Also, we do not move down to ineligible successors (is_ineligible_successor),
132    which include moving along a back-edge, moving to already scheduled code,
133    and moving to another fence.  The first two restrictions are lifted during
134    pipelining, which allows us to move insns along a back-edge.  We always have
135    an acyclic region for scheduling because we forbid motion through fences.
136 
137    Choosing the best expression
138    ============================
139 
140    We sort the final availability set via sel_rank_for_schedule, then we remove
141    expressions which are not yet ready (tick_check_p) or which dest registers
142    cannot be used.  For some of them, we choose another register via
143    find_best_reg.  To do this, we run find_used_regs to calculate the set of
144    registers which cannot be used.  The find_used_regs function performs
145    a traversal of code motion paths for an expr.  We consider for renaming
146    only registers which are from the same regclass as the original one and
147    using which does not interfere with any live ranges.  Finally, we convert
148    the resulting set to the ready list format and use max_issue and reorder*
149    hooks similarly to the Haifa scheduler.
150 
151    Scheduling the best expression
152    ==============================
153 
154    We run the move_op routine to perform the same type of code motion paths
155    traversal as in find_used_regs.  (These are working via the same driver,
156    code_motion_path_driver.)  When moving down the CFG, we look for original
157    instruction that gave birth to a chosen expression.  We undo
158    the transformations performed on an expression via the history saved in it.
159    When found, we remove the instruction or leave a reg-reg copy/speculation
160    check if needed.  On a way up, we insert bookkeeping copies at each join
161    point.  If a copy is not needed, it will be removed later during this
162    traversal.  We update the saved av sets and liveness sets on the way up, too.
163 
164    Finalizing the schedule
165    =======================
166 
167    When pipelining, we reschedule the blocks from which insns were pipelined
168    to get a tighter schedule.  On Itanium, we also perform bundling via
169    the same routine from ia64.c.
170 
171    Dependence analysis changes
172    ===========================
173 
174    We augmented the sched-deps.c with hooks that get called when a particular
175    dependence is found in a particular part of an insn.  Using these hooks, we
176    can do several actions such as: determine whether an insn can be moved through
177    another (has_dependence_p, moveup_expr); find out whether an insn can be
178    scheduled on the current cycle (tick_check_p); find out registers that
179    are set/used/clobbered by an insn and find out all the strange stuff that
180    restrict its movement, like SCHED_GROUP_P or CANT_MOVE (done in
181    init_global_and_expr_for_insn).
182 
183    Initialization changes
184    ======================
185 
186    There are parts of haifa-sched.c, sched-deps.c, and sched-rgn.c that are
187    reused in all of the schedulers.  We have split up the initialization of data
188    of such parts into different functions prefixed with scheduler type and
189    postfixed with the type of data initialized: {,sel_,haifa_}sched_{init,finish},
190    sched_rgn_init/finish, sched_deps_init/finish, sched_init_{luids/bbs}, etc.
191    The same splitting is done with current_sched_info structure:
192    dependence-related parts are in sched_deps_info, common part is in
193    common_sched_info, and haifa/sel/etc part is in current_sched_info.
194 
195    Target contexts
196    ===============
197 
198    As we now have multiple-point scheduling, this would not work with backends
199    which save some of the scheduler state to use it in the target hooks.
200    For this purpose, we introduce a concept of target contexts, which
201    encapsulate such information.  The backend should implement simple routines
202    of allocating/freeing/setting such a context.  The scheduler calls these
203    as target hooks and handles the target context as an opaque pointer (similar
204    to the DFA state type, state_t).
205 
206    Various speedups
207    ================
208 
209    As the correct data dependence graph is not supported during scheduling (which
210    is to be changed in mid-term), we cache as much of the dependence analysis
211    results as possible to avoid reanalyzing.  This includes: bitmap caches on
212    each insn in stream of the region saying yes/no for a query with a pair of
213    UIDs; hashtables with the previously done transformations on each insn in
214    stream; a vector keeping a history of transformations on each expr.
215 
216    Also, we try to minimize the dependence context used on each fence to check
217    whether the given expression is ready for scheduling by removing from it
218    insns that are definitely completed the execution.  The results of
219    tick_check_p checks are also cached in a vector on each fence.
220 
221    We keep a valid liveness set on each insn in a region to avoid the high
222    cost of recomputation on large basic blocks.
223 
224    Finally, we try to minimize the number of needed updates to the availability
225    sets.  The updates happen in two cases: when fill_insns terminates,
226    we advance all fences and increase the stage number to show that the region
227    has changed and the sets are to be recomputed; and when the next iteration
228    of a loop in fill_insns happens (but this one reuses the saved av sets
229    on bb headers.)  Thus, we try to break the fill_insns loop only when
230    "significant" number of insns from the current scheduling window was
231    scheduled.  This should be made a target param.
232 
233 
234    TODO: correctly support the data dependence graph at all stages and get rid
235    of all caches.  This should speed up the scheduler.
236    TODO: implement moving cond jumps with bookkeeping copies on both targets.
237    TODO: tune the scheduler before RA so it does not create too much pseudos.
238 
239 
240    References:
241    S.-M. Moon and K. Ebcioglu. Parallelizing nonnumerical code with
242    selective scheduling and software pipelining.
243    ACM TOPLAS, Vol 19, No. 6, pages 853--898, Nov. 1997.
244 
245    Andrey Belevantsev, Maxim Kuvyrkov, Vladimir Makarov, Dmitry Melnik,
246    and Dmitry Zhurikhin.  An interblock VLIW-targeted instruction scheduler
247    for GCC. In Proceedings of GCC Developers' Summit 2006.
248 
249    Arutyun Avetisyan, Andrey Belevantsev, and Dmitry Melnik.  GCC Instruction
250    Scheduler and Software Pipeliner on the Itanium Platform.   EPIC-7 Workshop.
251    http://rogue.colorado.edu/EPIC7/.
252 
253 */
254 
255 /* True when pipelining is enabled.  */
256 bool pipelining_p;
257 
258 /* True if bookkeeping is enabled.  */
259 bool bookkeeping_p;
260 
261 /* Maximum number of insns that are eligible for renaming.  */
262 int max_insns_to_rename;
263 
264 
265 /* Definitions of local types and macros.  */
266 
267 /* Represents possible outcomes of moving an expression through an insn.  */
268 enum MOVEUP_EXPR_CODE
269   {
270     /* The expression is not changed.  */
271     MOVEUP_EXPR_SAME,
272 
273     /* Not changed, but requires a new destination register.  */
274     MOVEUP_EXPR_AS_RHS,
275 
276     /* Cannot be moved.  */
277     MOVEUP_EXPR_NULL,
278 
279     /* Changed (substituted or speculated).  */
280     MOVEUP_EXPR_CHANGED
281   };
282 
283 /* The container to be passed into rtx search & replace functions.  */
284 struct rtx_search_arg
285 {
286   /* What we are searching for.  */
287   rtx x;
288 
289   /* The occurence counter.  */
290   int n;
291 };
292 
293 typedef struct rtx_search_arg *rtx_search_arg_p;
294 
295 /* This struct contains precomputed hard reg sets that are needed when
296    computing registers available for renaming.  */
297 struct hard_regs_data
298 {
299   /* For every mode, this stores registers available for use with
300      that mode.  */
301   HARD_REG_SET regs_for_mode[NUM_MACHINE_MODES];
302 
303   /* True when regs_for_mode[mode] is initialized.  */
304   bool regs_for_mode_ok[NUM_MACHINE_MODES];
305 
306   /* For every register, it has regs that are ok to rename into it.
307      The register in question is always set.  If not, this means
308      that the whole set is not computed yet.  */
309   HARD_REG_SET regs_for_rename[FIRST_PSEUDO_REGISTER];
310 
311   /* For every mode, this stores registers not available due to
312      call clobbering.  */
313   HARD_REG_SET regs_for_call_clobbered[NUM_MACHINE_MODES];
314 
315   /* All registers that are used or call used.  */
316   HARD_REG_SET regs_ever_used;
317 
318 #ifdef STACK_REGS
319   /* Stack registers.  */
320   HARD_REG_SET stack_regs;
321 #endif
322 };
323 
324 /* Holds the results of computation of available for renaming and
325    unavailable hard registers.  */
326 struct reg_rename
327 {
328   /* These are unavailable due to calls crossing, globalness, etc.  */
329   HARD_REG_SET unavailable_hard_regs;
330 
331   /* These are *available* for renaming.  */
332   HARD_REG_SET available_for_renaming;
333 
334   /* Whether this code motion path crosses a call.  */
335   bool crosses_call;
336 };
337 
338 /* A global structure that contains the needed information about harg
339    regs.  */
340 static struct hard_regs_data sel_hrd;
341 
342 
343 /* This structure holds local data used in code_motion_path_driver hooks on
344    the same or adjacent levels of recursion.  Here we keep those parameters
345    that are not used in code_motion_path_driver routine itself, but only in
346    its hooks.  Moreover, all parameters that can be modified in hooks are
347    in this structure, so all other parameters passed explicitly to hooks are
348    read-only.  */
349 struct cmpd_local_params
350 {
351   /* Local params used in move_op_* functions.  */
352 
353   /* Edges for bookkeeping generation.  */
354   edge e1, e2;
355 
356   /* C_EXPR merged from all successors and locally allocated temporary C_EXPR.  */
357   expr_t c_expr_merged, c_expr_local;
358 
359   /* Local params used in fur_* functions.  */
360   /* Copy of the ORIGINAL_INSN list, stores the original insns already
361      found before entering the current level of code_motion_path_driver.  */
362   def_list_t old_original_insns;
363 
364   /* Local params used in move_op_* functions.  */
365   /* True when we have removed last insn in the block which was
366      also a boundary.  Do not update anything or create bookkeeping copies.  */
367   BOOL_BITFIELD removed_last_insn : 1;
368 };
369 
370 /* Stores the static parameters for move_op_* calls.  */
371 struct moveop_static_params
372 {
373   /* Destination register.  */
374   rtx dest;
375 
376   /* Current C_EXPR.  */
377   expr_t c_expr;
378 
379   /* An UID of expr_vliw which is to be moved up.  If we find other exprs,
380      they are to be removed.  */
381   int uid;
382 
383 #ifdef ENABLE_CHECKING
384   /* This is initialized to the insn on which the driver stopped its traversal.  */
385   insn_t failed_insn;
386 #endif
387 
388   /* True if we scheduled an insn with different register.  */
389   bool was_renamed;
390 };
391 
392 /* Stores the static parameters for fur_* calls.  */
393 struct fur_static_params
394 {
395   /* Set of registers unavailable on the code motion path.  */
396   regset used_regs;
397 
398   /* Pointer to the list of original insns definitions.  */
399   def_list_t *original_insns;
400 
401   /* True if a code motion path contains a CALL insn.  */
402   bool crosses_call;
403 };
404 
405 typedef struct fur_static_params *fur_static_params_p;
406 typedef struct cmpd_local_params *cmpd_local_params_p;
407 typedef struct moveop_static_params *moveop_static_params_p;
408 
409 /* Set of hooks and parameters that determine behaviour specific to
410    move_op or find_used_regs functions.  */
411 struct code_motion_path_driver_info_def
412 {
413   /* Called on enter to the basic block.  */
414   int (*on_enter) (insn_t, cmpd_local_params_p, void *, bool);
415 
416   /* Called when original expr is found.  */
417   void (*orig_expr_found) (insn_t, expr_t, cmpd_local_params_p, void *);
418 
419   /* Called while descending current basic block if current insn is not
420      the original EXPR we're searching for.  */
421   bool (*orig_expr_not_found) (insn_t, av_set_t, void *);
422 
423   /* Function to merge C_EXPRes from different successors.  */
424   void (*merge_succs) (insn_t, insn_t, int, cmpd_local_params_p, void *);
425 
426   /* Function to finalize merge from different successors and possibly
427      deallocate temporary data structures used for merging.  */
428   void (*after_merge_succs) (cmpd_local_params_p, void *);
429 
430   /* Called on the backward stage of recursion to do moveup_expr.
431      Used only with move_op_*.  */
432   void (*ascend) (insn_t, void *);
433 
434   /* Called on the ascending pass, before returning from the current basic
435      block or from the whole traversal.  */
436   void (*at_first_insn) (insn_t, cmpd_local_params_p, void *);
437 
438   /* When processing successors in move_op we need only descend into
439      SUCCS_NORMAL successors, while in find_used_regs we need SUCCS_ALL.  */
440   int succ_flags;
441 
442   /* The routine name to print in dumps ("move_op" of "find_used_regs").  */
443   const char *routine_name;
444 };
445 
446 /* Global pointer to current hooks, either points to MOVE_OP_HOOKS or
447    FUR_HOOKS.  */
448 struct code_motion_path_driver_info_def *code_motion_path_driver_info;
449 
450 /* Set of hooks for performing move_op and find_used_regs routines with
451    code_motion_path_driver.  */
452 extern struct code_motion_path_driver_info_def move_op_hooks, fur_hooks;
453 
454 /* True if/when we want to emulate Haifa scheduler in the common code.
455    This is used in sched_rgn_local_init and in various places in
456    sched-deps.c.  */
457 int sched_emulate_haifa_p;
458 
459 /* GLOBAL_LEVEL is used to discard information stored in basic block headers
460    av_sets.  Av_set of bb header is valid if its (bb header's) level is equal
461    to GLOBAL_LEVEL.  And invalid if lesser.  This is primarily used to advance
462    scheduling window.  */
463 int global_level;
464 
465 /* Current fences.  */
466 flist_t fences;
467 
468 /* True when separable insns should be scheduled as RHSes.  */
469 static bool enable_schedule_as_rhs_p;
470 
471 /* Used in verify_target_availability to assert that target reg is reported
472    unavailabile by both TARGET_UNAVAILABLE and find_used_regs only if
473    we haven't scheduled anything on the previous fence.
474    if scheduled_something_on_previous_fence is true, TARGET_UNAVAILABLE can
475    have more conservative value than the one returned by the
476    find_used_regs, thus we shouldn't assert that these values are equal.  */
477 static bool scheduled_something_on_previous_fence;
478 
479 /* All newly emitted insns will have their uids greater than this value.  */
480 static int first_emitted_uid;
481 
482 /* Set of basic blocks that are forced to start new ebbs.  This is a subset
483    of all the ebb heads.  */
484 static bitmap_head _forced_ebb_heads;
485 bitmap_head *forced_ebb_heads = &_forced_ebb_heads;
486 
487 /* Blocks that need to be rescheduled after pipelining.  */
488 bitmap blocks_to_reschedule = NULL;
489 
490 /* True when the first lv set should be ignored when updating liveness.  */
491 static bool ignore_first = false;
492 
493 /* Number of insns max_issue has initialized data structures for.  */
494 static int max_issue_size = 0;
495 
496 /* Whether we can issue more instructions.  */
497 static int can_issue_more;
498 
499 /* Maximum software lookahead window size, reduced when rescheduling after
500    pipelining.  */
501 static int max_ws;
502 
503 /* Number of insns scheduled in current region.  */
504 static int num_insns_scheduled;
505 
506 /* A vector of expressions is used to be able to sort them.  */
507 DEF_VEC_P(expr_t);
508 DEF_VEC_ALLOC_P(expr_t,heap);
509 static VEC(expr_t, heap) *vec_av_set = NULL;
510 
511 /* A vector of vinsns is used to hold temporary lists of vinsns.  */
512 DEF_VEC_P(vinsn_t);
513 DEF_VEC_ALLOC_P(vinsn_t,heap);
514 typedef VEC(vinsn_t, heap) *vinsn_vec_t;
515 
516 /* This vector has the exprs which may still present in av_sets, but actually
517    can't be moved up due to bookkeeping created during code motion to another
518    fence.  See comment near the call to update_and_record_unavailable_insns
519    for the detailed explanations.  */
520 static vinsn_vec_t vec_bookkeeping_blocked_vinsns = NULL;
521 
522 /* This vector has vinsns which are scheduled with renaming on the first fence
523    and then seen on the second.  For expressions with such vinsns, target
524    availability information may be wrong.  */
525 static vinsn_vec_t vec_target_unavailable_vinsns = NULL;
526 
527 /* Vector to store temporary nops inserted in move_op to prevent removal
528    of empty bbs.  */
529 DEF_VEC_P(insn_t);
530 DEF_VEC_ALLOC_P(insn_t,heap);
531 static VEC(insn_t, heap) *vec_temp_moveop_nops = NULL;
532 
533 /* These bitmaps record original instructions scheduled on the current
534    iteration and bookkeeping copies created by them.  */
535 static bitmap current_originators = NULL;
536 static bitmap current_copies = NULL;
537 
538 /* This bitmap marks the blocks visited by code_motion_path_driver so we don't
539    visit them afterwards.  */
540 static bitmap code_motion_visited_blocks = NULL;
541 
542 /* Variables to accumulate different statistics.  */
543 
544 /* The number of bookkeeping copies created.  */
545 static int stat_bookkeeping_copies;
546 
547 /* The number of insns that required bookkeeiping for their scheduling.  */
548 static int stat_insns_needed_bookkeeping;
549 
550 /* The number of insns that got renamed.  */
551 static int stat_renamed_scheduled;
552 
553 /* The number of substitutions made during scheduling.  */
554 static int stat_substitutions_total;
555 
556 
557 /* Forward declarations of static functions.  */
558 static bool rtx_ok_for_substitution_p (rtx, rtx);
559 static int sel_rank_for_schedule (const void *, const void *);
560 static av_set_t find_sequential_best_exprs (bnd_t, expr_t, bool);
561 static basic_block find_block_for_bookkeeping (edge e1, edge e2, bool lax);
562 
563 static rtx get_dest_from_orig_ops (av_set_t);
564 static basic_block generate_bookkeeping_insn (expr_t, edge, edge);
565 static bool find_used_regs (insn_t, av_set_t, regset, struct reg_rename *,
566                             def_list_t *);
567 static bool move_op (insn_t, av_set_t, expr_t, rtx, expr_t, bool*);
568 static int code_motion_path_driver (insn_t, av_set_t, ilist_t,
569                                     cmpd_local_params_p, void *);
570 static void sel_sched_region_1 (void);
571 static void sel_sched_region_2 (int);
572 static av_set_t compute_av_set_inside_bb (insn_t, ilist_t, int, bool);
573 
574 static void debug_state (state_t);
575 
576 
577 /* Functions that work with fences.  */
578 
579 /* Advance one cycle on FENCE.  */
580 static void
581 advance_one_cycle (fence_t fence)
582 {
583   unsigned i;
584   int cycle;
585   rtx insn;
586 
587   advance_state (FENCE_STATE (fence));
588   cycle = ++FENCE_CYCLE (fence);
589   FENCE_ISSUED_INSNS (fence) = 0;
590   FENCE_STARTS_CYCLE_P (fence) = 1;
591   can_issue_more = issue_rate;
592   FENCE_ISSUE_MORE (fence) = can_issue_more;
593 
594   for (i = 0; VEC_iterate (rtx, FENCE_EXECUTING_INSNS (fence), i, insn); )
595     {
596       if (INSN_READY_CYCLE (insn) < cycle)
597         {
598           remove_from_deps (FENCE_DC (fence), insn);
599           VEC_unordered_remove (rtx, FENCE_EXECUTING_INSNS (fence), i);
600           continue;
601         }
602       i++;
603     }
604   if (sched_verbose >= 2)
605     {
606       sel_print ("Finished a cycle.  Current cycle = %d\n", FENCE_CYCLE (fence));
607       debug_state (FENCE_STATE (fence));
608     }
609 }
610 
611 /* Returns true when SUCC in a fallthru bb of INSN, possibly
612    skipping empty basic blocks.  */
613 static bool
614 in_fallthru_bb_p (rtx insn, rtx succ)
615 {
616   basic_block bb = BLOCK_FOR_INSN (insn);
617   edge e;
618 
619   if (bb == BLOCK_FOR_INSN (succ))
620     return true;
621 
622   e = find_fallthru_edge_from (bb);
623   if (e)
624     bb = e->dest;
625   else
626     return false;
627 
628   while (sel_bb_empty_p (bb))
629     bb = bb->next_bb;
630 
631   return bb == BLOCK_FOR_INSN (succ);
632 }
633 
634 /* Construct successor fences from OLD_FENCEs and put them in NEW_FENCES.
635    When a successor will continue a ebb, transfer all parameters of a fence
636    to the new fence.  ORIG_MAX_SEQNO is the maximal seqno before this round
637    of scheduling helping to distinguish between the old and the new code.  */
638 static void
639 extract_new_fences_from (flist_t old_fences, flist_tail_t new_fences,
640 			 int orig_max_seqno)
641 {
642   bool was_here_p = false;
643   insn_t insn = NULL_RTX;
644   insn_t succ;
645   succ_iterator si;
646   ilist_iterator ii;
647   fence_t fence = FLIST_FENCE (old_fences);
648   basic_block bb;
649 
650   /* Get the only element of FENCE_BNDS (fence).  */
651   FOR_EACH_INSN (insn, ii, FENCE_BNDS (fence))
652     {
653       gcc_assert (!was_here_p);
654       was_here_p = true;
655     }
656   gcc_assert (was_here_p && insn != NULL_RTX);
657 
658   /* When in the "middle" of the block, just move this fence
659      to the new list.  */
660   bb = BLOCK_FOR_INSN (insn);
661   if (! sel_bb_end_p (insn)
662       || (single_succ_p (bb)
663           && single_pred_p (single_succ (bb))))
664     {
665       insn_t succ;
666 
667       succ = (sel_bb_end_p (insn)
668               ? sel_bb_head (single_succ (bb))
669               : NEXT_INSN (insn));
670 
671       if (INSN_SEQNO (succ) > 0
672           && INSN_SEQNO (succ) <= orig_max_seqno
673           && INSN_SCHED_TIMES (succ) <= 0)
674         {
675           FENCE_INSN (fence) = succ;
676           move_fence_to_fences (old_fences, new_fences);
677 
678           if (sched_verbose >= 1)
679             sel_print ("Fence %d continues as %d[%d] (state continue)\n",
680                        INSN_UID (insn), INSN_UID (succ), BLOCK_NUM (succ));
681         }
682       return;
683     }
684 
685   /* Otherwise copy fence's structures to (possibly) multiple successors.  */
686   FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
687     {
688       int seqno = INSN_SEQNO (succ);
689 
690       if (0 < seqno && seqno <= orig_max_seqno
691           && (pipelining_p || INSN_SCHED_TIMES (succ) <= 0))
692         {
693           bool b = (in_same_ebb_p (insn, succ)
694                     || in_fallthru_bb_p (insn, succ));
695 
696           if (sched_verbose >= 1)
697             sel_print ("Fence %d continues as %d[%d] (state %s)\n",
698                        INSN_UID (insn), INSN_UID (succ),
699                        BLOCK_NUM (succ), b ? "continue" : "reset");
700 
701           if (b)
702             add_dirty_fence_to_fences (new_fences, succ, fence);
703           else
704             {
705               /* Mark block of the SUCC as head of the new ebb.  */
706               bitmap_set_bit (forced_ebb_heads, BLOCK_NUM (succ));
707               add_clean_fence_to_fences (new_fences, succ, fence);
708             }
709         }
710     }
711 }
712 
713 
714 /* Functions to support substitution.  */
715 
716 /* Returns whether INSN with dependence status DS is eligible for
717    substitution, i.e. it's a copy operation x := y, and RHS that is
718    moved up through this insn should be substituted.  */
719 static bool
720 can_substitute_through_p (insn_t insn, ds_t ds)
721 {
722   /* We can substitute only true dependencies.  */
723   if ((ds & DEP_OUTPUT)
724       || (ds & DEP_ANTI)
725       || ! INSN_RHS (insn)
726       || ! INSN_LHS (insn))
727     return false;
728 
729   /* Now we just need to make sure the INSN_RHS consists of only one
730      simple REG rtx.  */
731   if (REG_P (INSN_LHS (insn))
732       && REG_P (INSN_RHS (insn)))
733     return true;
734   return false;
735 }
736 
737 /* Substitute all occurences of INSN's destination in EXPR' vinsn with INSN's
738    source (if INSN is eligible for substitution).  Returns TRUE if
739    substitution was actually performed, FALSE otherwise.  Substitution might
740    be not performed because it's either EXPR' vinsn doesn't contain INSN's
741    destination or the resulting insn is invalid for the target machine.
742    When UNDO is true, perform unsubstitution instead (the difference is in
743    the part of rtx on which validate_replace_rtx is called).  */
744 static bool
745 substitute_reg_in_expr (expr_t expr, insn_t insn, bool undo)
746 {
747   rtx *where;
748   bool new_insn_valid;
749   vinsn_t *vi = &EXPR_VINSN (expr);
750   bool has_rhs = VINSN_RHS (*vi) != NULL;
751   rtx old, new_rtx;
752 
753   /* Do not try to replace in SET_DEST.  Although we'll choose new
754      register for the RHS, we don't want to change RHS' original reg.
755      If the insn is not SET, we may still be able to substitute something
756      in it, and if we're here (don't have deps), it doesn't write INSN's
757      dest.  */
758   where = (has_rhs
759 	   ? &VINSN_RHS (*vi)
760 	   : &PATTERN (VINSN_INSN_RTX (*vi)));
761   old = undo ? INSN_RHS (insn) : INSN_LHS (insn);
762 
763   /* Substitute if INSN has a form of x:=y and LHS(INSN) occurs in *VI.  */
764   if (rtx_ok_for_substitution_p (old, *where))
765     {
766       rtx new_insn;
767       rtx *where_replace;
768 
769       /* We should copy these rtxes before substitution.  */
770       new_rtx = copy_rtx (undo ? INSN_LHS (insn) : INSN_RHS (insn));
771       new_insn = create_copy_of_insn_rtx (VINSN_INSN_RTX (*vi));
772 
773       /* Where we'll replace.
774          WHERE_REPLACE should point inside NEW_INSN, so INSN_RHS couldn't be
775 	 used instead of SET_SRC.  */
776       where_replace = (has_rhs
777 		       ? &SET_SRC (PATTERN (new_insn))
778 		       : &PATTERN (new_insn));
779 
780       new_insn_valid
781         = validate_replace_rtx_part_nosimplify (old, new_rtx, where_replace,
782                                                 new_insn);
783 
784       /* ??? Actually, constrain_operands result depends upon choice of
785          destination register.  E.g. if we allow single register to be an rhs,
786 	 and if we try to move dx=ax(as rhs) through ax=dx, we'll result
787 	 in invalid insn dx=dx, so we'll loose this rhs here.
788 	 Just can't come up with significant testcase for this, so just
789 	 leaving it for now.  */
790       if (new_insn_valid)
791 	{
792 	  change_vinsn_in_expr (expr,
793 				create_vinsn_from_insn_rtx (new_insn, false));
794 
795 	  /* Do not allow clobbering the address register of speculative
796              insns.  */
797 	  if ((EXPR_SPEC_DONE_DS (expr) & SPECULATIVE)
798               && register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
799 					 expr_dest_reg (expr)))
800 	    EXPR_TARGET_AVAILABLE (expr) = false;
801 
802 	  return true;
803 	}
804       else
805         return false;
806     }
807   else
808     return false;
809 }
810 
811 /* Helper function for count_occurences_equiv.  */
812 static int
813 count_occurrences_1 (rtx *cur_rtx, void *arg)
814 {
815   rtx_search_arg_p p = (rtx_search_arg_p) arg;
816 
817   if (REG_P (*cur_rtx) && REGNO (*cur_rtx) == REGNO (p->x))
818     {
819       /* Bail out if mode is different or more than one register is used.  */
820       if (GET_MODE (*cur_rtx) != GET_MODE (p->x)
821           || (HARD_REGISTER_P (*cur_rtx)
822 	      && hard_regno_nregs[REGNO(*cur_rtx)][GET_MODE (*cur_rtx)] > 1))
823         {
824           p->n = 0;
825           return 1;
826         }
827 
828       p->n++;
829 
830       /* Do not traverse subexprs.  */
831       return -1;
832     }
833 
834   if (GET_CODE (*cur_rtx) == SUBREG
835       && (!REG_P (SUBREG_REG (*cur_rtx))
836 	  || REGNO (SUBREG_REG (*cur_rtx)) == REGNO (p->x)))
837     {
838       /* ??? Do not support substituting regs inside subregs.  In that case,
839          simplify_subreg will be called by validate_replace_rtx, and
840          unsubstitution will fail later.  */
841       p->n = 0;
842       return 1;
843     }
844 
845   /* Continue search.  */
846   return 0;
847 }
848 
849 /* Return the number of places WHAT appears within WHERE.
850    Bail out when we found a reference occupying several hard registers.  */
851 static int
852 count_occurrences_equiv (rtx what, rtx where)
853 {
854   struct rtx_search_arg arg;
855 
856   gcc_assert (REG_P (what));
857   arg.x = what;
858   arg.n = 0;
859 
860   for_each_rtx (&where, &count_occurrences_1, (void *) &arg);
861 
862   return arg.n;
863 }
864 
865 /* Returns TRUE if WHAT is found in WHERE rtx tree.  */
866 static bool
867 rtx_ok_for_substitution_p (rtx what, rtx where)
868 {
869   return (count_occurrences_equiv (what, where) > 0);
870 }
871 
872 
873 /* Functions to support register renaming.  */
874 
875 /* Substitute VI's set source with REGNO.  Returns newly created pattern
876    that has REGNO as its source.  */
877 static rtx
878 create_insn_rtx_with_rhs (vinsn_t vi, rtx rhs_rtx)
879 {
880   rtx lhs_rtx;
881   rtx pattern;
882   rtx insn_rtx;
883 
884   lhs_rtx = copy_rtx (VINSN_LHS (vi));
885 
886   pattern = gen_rtx_SET (VOIDmode, lhs_rtx, rhs_rtx);
887   insn_rtx = create_insn_rtx_from_pattern (pattern, NULL_RTX);
888 
889   return insn_rtx;
890 }
891 
892 /* Returns whether INSN's src can be replaced with register number
893    NEW_SRC_REG. E.g. the following insn is valid for i386:
894 
895     (insn:HI 2205 6585 2207 727 ../../gcc/libiberty/regex.c:3337
896       (set (mem/s:QI (plus:SI (plus:SI (reg/f:SI 7 sp)
897 			(reg:SI 0 ax [orig:770 c1 ] [770]))
898 		    (const_int 288 [0x120])) [0 str S1 A8])
899 	    (const_int 0 [0x0])) 43 {*movqi_1} (nil)
900 	(nil))
901 
902   But if we change (const_int 0 [0x0]) to (reg:QI 4 si), it will be invalid
903   because of operand constraints:
904 
905     (define_insn "*movqi_1"
906       [(set (match_operand:QI 0 "nonimmediate_operand" "=q,q ,q ,r,r ,?r,m")
907 	    (match_operand:QI 1 "general_operand"      " q,qn,qm,q,rn,qm,qn")
908 	    )]
909 
910   So do constrain_operands here, before choosing NEW_SRC_REG as best
911   reg for rhs.  */
912 
913 static bool
914 replace_src_with_reg_ok_p (insn_t insn, rtx new_src_reg)
915 {
916   vinsn_t vi = INSN_VINSN (insn);
917   enum machine_mode mode;
918   rtx dst_loc;
919   bool res;
920 
921   gcc_assert (VINSN_SEPARABLE_P (vi));
922 
923   get_dest_and_mode (insn, &dst_loc, &mode);
924   gcc_assert (mode == GET_MODE (new_src_reg));
925 
926   if (REG_P (dst_loc) && REGNO (new_src_reg) == REGNO (dst_loc))
927     return true;
928 
929   /* See whether SET_SRC can be replaced with this register.  */
930   validate_change (insn, &SET_SRC (PATTERN (insn)), new_src_reg, 1);
931   res = verify_changes (0);
932   cancel_changes (0);
933 
934   return res;
935 }
936 
937 /* Returns whether INSN still be valid after replacing it's DEST with
938    register NEW_REG.  */
939 static bool
940 replace_dest_with_reg_ok_p (insn_t insn, rtx new_reg)
941 {
942   vinsn_t vi = INSN_VINSN (insn);
943   bool res;
944 
945   /* We should deal here only with separable insns.  */
946   gcc_assert (VINSN_SEPARABLE_P (vi));
947   gcc_assert (GET_MODE (VINSN_LHS (vi)) == GET_MODE (new_reg));
948 
949   /* See whether SET_DEST can be replaced with this register.  */
950   validate_change (insn, &SET_DEST (PATTERN (insn)), new_reg, 1);
951   res = verify_changes (0);
952   cancel_changes (0);
953 
954   return res;
955 }
956 
957 /* Create a pattern with rhs of VI and lhs of LHS_RTX.  */
958 static rtx
959 create_insn_rtx_with_lhs (vinsn_t vi, rtx lhs_rtx)
960 {
961   rtx rhs_rtx;
962   rtx pattern;
963   rtx insn_rtx;
964 
965   rhs_rtx = copy_rtx (VINSN_RHS (vi));
966 
967   pattern = gen_rtx_SET (VOIDmode, lhs_rtx, rhs_rtx);
968   insn_rtx = create_insn_rtx_from_pattern (pattern, NULL_RTX);
969 
970   return insn_rtx;
971 }
972 
973 /* Substitute lhs in the given expression EXPR for the register with number
974    NEW_REGNO.  SET_DEST may be arbitrary rtx, not only register.  */
975 static void
976 replace_dest_with_reg_in_expr (expr_t expr, rtx new_reg)
977 {
978   rtx insn_rtx;
979   vinsn_t vinsn;
980 
981   insn_rtx = create_insn_rtx_with_lhs (EXPR_VINSN (expr), new_reg);
982   vinsn = create_vinsn_from_insn_rtx (insn_rtx, false);
983 
984   change_vinsn_in_expr (expr, vinsn);
985   EXPR_WAS_RENAMED (expr) = 1;
986   EXPR_TARGET_AVAILABLE (expr) = 1;
987 }
988 
989 /* Returns whether VI writes either one of the USED_REGS registers or,
990    if a register is a hard one, one of the UNAVAILABLE_HARD_REGS registers.  */
991 static bool
992 vinsn_writes_one_of_regs_p (vinsn_t vi, regset used_regs,
993                             HARD_REG_SET unavailable_hard_regs)
994 {
995   unsigned regno;
996   reg_set_iterator rsi;
997 
998   EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (vi), 0, regno, rsi)
999     {
1000       if (REGNO_REG_SET_P (used_regs, regno))
1001         return true;
1002       if (HARD_REGISTER_NUM_P (regno)
1003           && TEST_HARD_REG_BIT (unavailable_hard_regs, regno))
1004 	return true;
1005     }
1006 
1007   EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (vi), 0, regno, rsi)
1008     {
1009       if (REGNO_REG_SET_P (used_regs, regno))
1010         return true;
1011       if (HARD_REGISTER_NUM_P (regno)
1012           && TEST_HARD_REG_BIT (unavailable_hard_regs, regno))
1013 	return true;
1014     }
1015 
1016   return false;
1017 }
1018 
1019 /* Returns register class of the output register in INSN.
1020    Returns NO_REGS for call insns because some targets have constraints on
1021    destination register of a call insn.
1022 
1023    Code adopted from regrename.c::build_def_use.  */
1024 static enum reg_class
1025 get_reg_class (rtx insn)
1026 {
1027   int alt, i, n_ops;
1028 
1029   extract_insn (insn);
1030   if (! constrain_operands (1))
1031     fatal_insn_not_found (insn);
1032   preprocess_constraints ();
1033   alt = which_alternative;
1034   n_ops = recog_data.n_operands;
1035 
1036   for (i = 0; i < n_ops; ++i)
1037     {
1038       int matches = recog_op_alt[i][alt].matches;
1039       if (matches >= 0)
1040 	recog_op_alt[i][alt].cl = recog_op_alt[matches][alt].cl;
1041     }
1042 
1043   if (asm_noperands (PATTERN (insn)) > 0)
1044     {
1045       for (i = 0; i < n_ops; i++)
1046 	if (recog_data.operand_type[i] == OP_OUT)
1047 	  {
1048 	    rtx *loc = recog_data.operand_loc[i];
1049 	    rtx op = *loc;
1050 	    enum reg_class cl = recog_op_alt[i][alt].cl;
1051 
1052 	    if (REG_P (op)
1053 		&& REGNO (op) == ORIGINAL_REGNO (op))
1054 	      continue;
1055 
1056 	    return cl;
1057 	  }
1058     }
1059   else if (!CALL_P (insn))
1060     {
1061       for (i = 0; i < n_ops + recog_data.n_dups; i++)
1062        {
1063 	 int opn = i < n_ops ? i : recog_data.dup_num[i - n_ops];
1064 	 enum reg_class cl = recog_op_alt[opn][alt].cl;
1065 
1066 	 if (recog_data.operand_type[opn] == OP_OUT ||
1067 	     recog_data.operand_type[opn] == OP_INOUT)
1068 	   return cl;
1069        }
1070     }
1071 
1072 /*  Insns like
1073     (insn (set (reg:CCZ 17 flags) (compare:CCZ ...)))
1074     may result in returning NO_REGS, cause flags is written implicitly through
1075     CMP insn, which has no OP_OUT | OP_INOUT operands.  */
1076   return NO_REGS;
1077 }
1078 
1079 #ifdef HARD_REGNO_RENAME_OK
1080 /* Calculate HARD_REGNO_RENAME_OK data for REGNO.  */
1081 static void
1082 init_hard_regno_rename (int regno)
1083 {
1084   int cur_reg;
1085 
1086   SET_HARD_REG_BIT (sel_hrd.regs_for_rename[regno], regno);
1087 
1088   for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
1089     {
1090       /* We are not interested in renaming in other regs.  */
1091       if (!TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg))
1092         continue;
1093 
1094       if (HARD_REGNO_RENAME_OK (regno, cur_reg))
1095         SET_HARD_REG_BIT (sel_hrd.regs_for_rename[regno], cur_reg);
1096     }
1097 }
1098 #endif
1099 
1100 /* A wrapper around HARD_REGNO_RENAME_OK that will look into the hard regs
1101    data first.  */
1102 static inline bool
1103 sel_hard_regno_rename_ok (int from ATTRIBUTE_UNUSED, int to ATTRIBUTE_UNUSED)
1104 {
1105 #ifdef HARD_REGNO_RENAME_OK
1106   /* Check whether this is all calculated.  */
1107   if (TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], from))
1108     return TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], to);
1109 
1110   init_hard_regno_rename (from);
1111 
1112   return TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], to);
1113 #else
1114   return true;
1115 #endif
1116 }
1117 
1118 /* Calculate set of registers that are capable of holding MODE.  */
1119 static void
1120 init_regs_for_mode (enum machine_mode mode)
1121 {
1122   int cur_reg;
1123 
1124   CLEAR_HARD_REG_SET (sel_hrd.regs_for_mode[mode]);
1125   CLEAR_HARD_REG_SET (sel_hrd.regs_for_call_clobbered[mode]);
1126 
1127   for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
1128     {
1129       int nregs = hard_regno_nregs[cur_reg][mode];
1130       int i;
1131 
1132       for (i = nregs - 1; i >= 0; --i)
1133         if (fixed_regs[cur_reg + i]
1134                 || global_regs[cur_reg + i]
1135             /* Can't use regs which aren't saved by
1136                the prologue.  */
1137             || !TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg + i)
1138 	    /* Can't use regs with non-null REG_BASE_VALUE, because adjusting
1139 	       it affects aliasing globally and invalidates all AV sets.  */
1140 	    || get_reg_base_value (cur_reg + i)
1141 #ifdef LEAF_REGISTERS
1142             /* We can't use a non-leaf register if we're in a
1143                leaf function.  */
1144             || (current_function_is_leaf
1145                 && !LEAF_REGISTERS[cur_reg + i])
1146 #endif
1147             )
1148           break;
1149 
1150       if (i >= 0)
1151         continue;
1152 
1153       /* See whether it accepts all modes that occur in
1154          original insns.  */
1155       if (! HARD_REGNO_MODE_OK (cur_reg, mode))
1156         continue;
1157 
1158       if (HARD_REGNO_CALL_PART_CLOBBERED (cur_reg, mode))
1159         SET_HARD_REG_BIT (sel_hrd.regs_for_call_clobbered[mode],
1160                           cur_reg);
1161 
1162       /* If the CUR_REG passed all the checks above,
1163          then it's ok.  */
1164       SET_HARD_REG_BIT (sel_hrd.regs_for_mode[mode], cur_reg);
1165     }
1166 
1167   sel_hrd.regs_for_mode_ok[mode] = true;
1168 }
1169 
1170 /* Init all register sets gathered in HRD.  */
1171 static void
1172 init_hard_regs_data (void)
1173 {
1174   int cur_reg = 0;
1175   int cur_mode = 0;
1176 
1177   CLEAR_HARD_REG_SET (sel_hrd.regs_ever_used);
1178   for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
1179     if (df_regs_ever_live_p (cur_reg) || call_used_regs[cur_reg])
1180       SET_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg);
1181 
1182   /* Initialize registers that are valid based on mode when this is
1183      really needed.  */
1184   for (cur_mode = 0; cur_mode < NUM_MACHINE_MODES; cur_mode++)
1185     sel_hrd.regs_for_mode_ok[cur_mode] = false;
1186 
1187   /* Mark that all HARD_REGNO_RENAME_OK is not calculated.  */
1188   for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
1189     CLEAR_HARD_REG_SET (sel_hrd.regs_for_rename[cur_reg]);
1190 
1191 #ifdef STACK_REGS
1192   CLEAR_HARD_REG_SET (sel_hrd.stack_regs);
1193 
1194   for (cur_reg = FIRST_STACK_REG; cur_reg <= LAST_STACK_REG; cur_reg++)
1195     SET_HARD_REG_BIT (sel_hrd.stack_regs, cur_reg);
1196 #endif
1197 }
1198 
1199 /* Mark hardware regs in REG_RENAME_P that are not suitable
1200    for renaming rhs in INSN due to hardware restrictions (register class,
1201    modes compatibility etc).  This doesn't affect original insn's dest reg,
1202    if it isn't in USED_REGS.  DEF is a definition insn of rhs for which the
1203    destination register is sought.  LHS (DEF->ORIG_INSN) may be REG or MEM.
1204    Registers that are in used_regs are always marked in
1205    unavailable_hard_regs as well.  */
1206 
1207 static void
1208 mark_unavailable_hard_regs (def_t def, struct reg_rename *reg_rename_p,
1209                             regset used_regs ATTRIBUTE_UNUSED)
1210 {
1211   enum machine_mode mode;
1212   enum reg_class cl = NO_REGS;
1213   rtx orig_dest;
1214   unsigned cur_reg, regno;
1215   hard_reg_set_iterator hrsi;
1216 
1217   gcc_assert (GET_CODE (PATTERN (def->orig_insn)) == SET);
1218   gcc_assert (reg_rename_p);
1219 
1220   orig_dest = SET_DEST (PATTERN (def->orig_insn));
1221 
1222   /* We have decided not to rename 'mem = something;' insns, as 'something'
1223      is usually a register.  */
1224   if (!REG_P (orig_dest))
1225     return;
1226 
1227   regno = REGNO (orig_dest);
1228 
1229   /* If before reload, don't try to work with pseudos.  */
1230   if (!reload_completed && !HARD_REGISTER_NUM_P (regno))
1231     return;
1232 
1233   if (reload_completed)
1234     cl = get_reg_class (def->orig_insn);
1235 
1236   /* Stop if the original register is one of the fixed_regs, global_regs or
1237      frame pointer, or we could not discover its class.  */
1238   if (fixed_regs[regno]
1239       || global_regs[regno]
1240 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
1241       || (frame_pointer_needed && regno == HARD_FRAME_POINTER_REGNUM)
1242 #else
1243       || (frame_pointer_needed && regno == FRAME_POINTER_REGNUM)
1244 #endif
1245       || (reload_completed && cl == NO_REGS))
1246     {
1247       SET_HARD_REG_SET (reg_rename_p->unavailable_hard_regs);
1248 
1249       /* Give a chance for original register, if it isn't in used_regs.  */
1250       if (!def->crosses_call)
1251         CLEAR_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, regno);
1252 
1253       return;
1254     }
1255 
1256   /* If something allocated on stack in this function, mark frame pointer
1257      register unavailable, considering also modes.
1258      FIXME: it is enough to do this once per all original defs.  */
1259   if (frame_pointer_needed)
1260     {
1261       add_to_hard_reg_set (&reg_rename_p->unavailable_hard_regs,
1262 			   Pmode, FRAME_POINTER_REGNUM);
1263 
1264       if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
1265         add_to_hard_reg_set (&reg_rename_p->unavailable_hard_regs,
1266 			     Pmode, HARD_FRAME_POINTER_REGNUM);
1267     }
1268 
1269 #ifdef STACK_REGS
1270   /* For the stack registers the presence of FIRST_STACK_REG in USED_REGS
1271      is equivalent to as if all stack regs were in this set.
1272      I.e. no stack register can be renamed, and even if it's an original
1273      register here we make sure it won't be lifted over it's previous def
1274      (it's previous def will appear as if it's a FIRST_STACK_REG def.
1275      The HARD_REGNO_RENAME_OK covers other cases in condition below.  */
1276   if (IN_RANGE (REGNO (orig_dest), FIRST_STACK_REG, LAST_STACK_REG)
1277       && REGNO_REG_SET_P (used_regs, FIRST_STACK_REG))
1278     IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs,
1279                       sel_hrd.stack_regs);
1280 #endif
1281 
1282   /* If there's a call on this path, make regs from call_used_reg_set
1283      unavailable.  */
1284   if (def->crosses_call)
1285     IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs,
1286                       call_used_reg_set);
1287 
1288   /* Stop here before reload: we need FRAME_REGS, STACK_REGS, and crosses_call,
1289      but not register classes.  */
1290   if (!reload_completed)
1291     return;
1292 
1293   /* Leave regs as 'available' only from the current
1294      register class.  */
1295   COPY_HARD_REG_SET (reg_rename_p->available_for_renaming,
1296                      reg_class_contents[cl]);
1297 
1298   mode = GET_MODE (orig_dest);
1299 
1300   /* Leave only registers available for this mode.  */
1301   if (!sel_hrd.regs_for_mode_ok[mode])
1302     init_regs_for_mode (mode);
1303   AND_HARD_REG_SET (reg_rename_p->available_for_renaming,
1304                     sel_hrd.regs_for_mode[mode]);
1305 
1306   /* Exclude registers that are partially call clobbered.  */
1307   if (def->crosses_call
1308       && ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode))
1309     AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming,
1310                             sel_hrd.regs_for_call_clobbered[mode]);
1311 
1312   /* Leave only those that are ok to rename.  */
1313   EXECUTE_IF_SET_IN_HARD_REG_SET (reg_rename_p->available_for_renaming,
1314                                   0, cur_reg, hrsi)
1315     {
1316       int nregs;
1317       int i;
1318 
1319       nregs = hard_regno_nregs[cur_reg][mode];
1320       gcc_assert (nregs > 0);
1321 
1322       for (i = nregs - 1; i >= 0; --i)
1323         if (! sel_hard_regno_rename_ok (regno + i, cur_reg + i))
1324           break;
1325 
1326       if (i >= 0)
1327         CLEAR_HARD_REG_BIT (reg_rename_p->available_for_renaming,
1328                             cur_reg);
1329     }
1330 
1331   AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming,
1332                           reg_rename_p->unavailable_hard_regs);
1333 
1334   /* Regno is always ok from the renaming part of view, but it really
1335      could be in *unavailable_hard_regs already, so set it here instead
1336      of there.  */
1337   SET_HARD_REG_BIT (reg_rename_p->available_for_renaming, regno);
1338 }
1339 
1340 /* reg_rename_tick[REG1] > reg_rename_tick[REG2] if REG1 was chosen as the
1341    best register more recently than REG2.  */
1342 static int reg_rename_tick[FIRST_PSEUDO_REGISTER];
1343 
1344 /* Indicates the number of times renaming happened before the current one.  */
1345 static int reg_rename_this_tick;
1346 
1347 /* Choose the register among free, that is suitable for storing
1348    the rhs value.
1349 
1350    ORIGINAL_INSNS is the list of insns where the operation (rhs)
1351    originally appears.  There could be multiple original operations
1352    for single rhs since we moving it up and merging along different
1353    paths.
1354 
1355    Some code is adapted from regrename.c (regrename_optimize).
1356    If original register is available, function returns it.
1357    Otherwise it performs the checks, so the new register should
1358    comply with the following:
1359     - it should not violate any live ranges (such registers are in
1360       REG_RENAME_P->available_for_renaming set);
1361     - it should not be in the HARD_REGS_USED regset;
1362     - it should be in the class compatible with original uses;
1363     - it should not be clobbered through reference with different mode;
1364     - if we're in the leaf function, then the new register should
1365       not be in the LEAF_REGISTERS;
1366     - etc.
1367 
1368    If several registers meet the conditions, the register with smallest
1369    tick is returned to achieve more even register allocation.
1370 
1371    If original register seems to be ok, we set *IS_ORIG_REG_P_PTR to true.
1372 
1373    If no register satisfies the above conditions, NULL_RTX is returned.  */
1374 static rtx
1375 choose_best_reg_1 (HARD_REG_SET hard_regs_used,
1376                    struct reg_rename *reg_rename_p,
1377                    def_list_t original_insns, bool *is_orig_reg_p_ptr)
1378 {
1379   int best_new_reg;
1380   unsigned cur_reg;
1381   enum machine_mode mode = VOIDmode;
1382   unsigned regno, i, n;
1383   hard_reg_set_iterator hrsi;
1384   def_list_iterator di;
1385   def_t def;
1386 
1387   /* If original register is available, return it.  */
1388   *is_orig_reg_p_ptr = true;
1389 
1390   FOR_EACH_DEF (def, di, original_insns)
1391     {
1392       rtx orig_dest = SET_DEST (PATTERN (def->orig_insn));
1393 
1394       gcc_assert (REG_P (orig_dest));
1395 
1396       /* Check that all original operations have the same mode.
1397          This is done for the next loop; if we'd return from this
1398          loop, we'd check only part of them, but in this case
1399          it doesn't matter.  */
1400       if (mode == VOIDmode)
1401         mode = GET_MODE (orig_dest);
1402       gcc_assert (mode == GET_MODE (orig_dest));
1403 
1404       regno = REGNO (orig_dest);
1405       for (i = 0, n = hard_regno_nregs[regno][mode]; i < n; i++)
1406         if (TEST_HARD_REG_BIT (hard_regs_used, regno + i))
1407           break;
1408 
1409       /* All hard registers are available.  */
1410       if (i == n)
1411         {
1412           gcc_assert (mode != VOIDmode);
1413 
1414           /* Hard registers should not be shared.  */
1415           return gen_rtx_REG (mode, regno);
1416         }
1417     }
1418 
1419   *is_orig_reg_p_ptr = false;
1420   best_new_reg = -1;
1421 
1422   /* Among all available regs choose the register that was
1423      allocated earliest.  */
1424   EXECUTE_IF_SET_IN_HARD_REG_SET (reg_rename_p->available_for_renaming,
1425                                   0, cur_reg, hrsi)
1426     if (! TEST_HARD_REG_BIT (hard_regs_used, cur_reg))
1427       {
1428 	/* Check that all hard regs for mode are available.  */
1429 	for (i = 1, n = hard_regno_nregs[cur_reg][mode]; i < n; i++)
1430 	  if (TEST_HARD_REG_BIT (hard_regs_used, cur_reg + i)
1431 	      || !TEST_HARD_REG_BIT (reg_rename_p->available_for_renaming,
1432 				     cur_reg + i))
1433 	    break;
1434 
1435 	if (i < n)
1436 	  continue;
1437 
1438         /* All hard registers are available.  */
1439         if (best_new_reg < 0
1440             || reg_rename_tick[cur_reg] < reg_rename_tick[best_new_reg])
1441           {
1442             best_new_reg = cur_reg;
1443 
1444             /* Return immediately when we know there's no better reg.  */
1445             if (! reg_rename_tick[best_new_reg])
1446               break;
1447           }
1448       }
1449 
1450   if (best_new_reg >= 0)
1451     {
1452       /* Use the check from the above loop.  */
1453       gcc_assert (mode != VOIDmode);
1454       return gen_rtx_REG (mode, best_new_reg);
1455     }
1456 
1457   return NULL_RTX;
1458 }
1459 
1460 /* A wrapper around choose_best_reg_1 () to verify that we make correct
1461    assumptions about available registers in the function.  */
1462 static rtx
1463 choose_best_reg (HARD_REG_SET hard_regs_used, struct reg_rename *reg_rename_p,
1464                  def_list_t original_insns, bool *is_orig_reg_p_ptr)
1465 {
1466   rtx best_reg = choose_best_reg_1 (hard_regs_used, reg_rename_p,
1467                                     original_insns, is_orig_reg_p_ptr);
1468 
1469   /* FIXME loop over hard_regno_nregs here.  */
1470   gcc_assert (best_reg == NULL_RTX
1471 	      || TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, REGNO (best_reg)));
1472 
1473   return best_reg;
1474 }
1475 
1476 /* Choose the pseudo register for storing rhs value.  As this is supposed
1477    to work before reload, we return either the original register or make
1478    the new one.  The parameters are the same that in choose_nest_reg_1
1479    functions, except that USED_REGS may contain pseudos.
1480    If we work with hard regs, check also REG_RENAME_P->UNAVAILABLE_HARD_REGS.
1481 
1482    TODO: take into account register pressure while doing this.  Up to this
1483    moment, this function would never return NULL for pseudos, but we should
1484    not rely on this.  */
1485 static rtx
1486 choose_best_pseudo_reg (regset used_regs,
1487                         struct reg_rename *reg_rename_p,
1488                         def_list_t original_insns, bool *is_orig_reg_p_ptr)
1489 {
1490   def_list_iterator i;
1491   def_t def;
1492   enum machine_mode mode = VOIDmode;
1493   bool bad_hard_regs = false;
1494 
1495   /* We should not use this after reload.  */
1496   gcc_assert (!reload_completed);
1497 
1498   /* If original register is available, return it.  */
1499   *is_orig_reg_p_ptr = true;
1500 
1501   FOR_EACH_DEF (def, i, original_insns)
1502     {
1503       rtx dest = SET_DEST (PATTERN (def->orig_insn));
1504       int orig_regno;
1505 
1506       gcc_assert (REG_P (dest));
1507 
1508       /* Check that all original operations have the same mode.  */
1509       if (mode == VOIDmode)
1510         mode = GET_MODE (dest);
1511       else
1512         gcc_assert (mode == GET_MODE (dest));
1513       orig_regno = REGNO (dest);
1514 
1515       if (!REGNO_REG_SET_P (used_regs, orig_regno))
1516         {
1517           if (orig_regno < FIRST_PSEUDO_REGISTER)
1518             {
1519               gcc_assert (df_regs_ever_live_p (orig_regno));
1520 
1521               /* For hard registers, we have to check hardware imposed
1522                  limitations (frame/stack registers, calls crossed).  */
1523               if (!TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs,
1524                                       orig_regno))
1525 		{
1526 		  /* Don't let register cross a call if it doesn't already
1527 		     cross one.  This condition is written in accordance with
1528 		     that in sched-deps.c sched_analyze_reg().  */
1529 		  if (!reg_rename_p->crosses_call
1530 		      || REG_N_CALLS_CROSSED (orig_regno) > 0)
1531 		    return gen_rtx_REG (mode, orig_regno);
1532 		}
1533 
1534               bad_hard_regs = true;
1535             }
1536           else
1537             return dest;
1538         }
1539      }
1540 
1541   *is_orig_reg_p_ptr = false;
1542 
1543   /* We had some original hard registers that couldn't be used.
1544      Those were likely special.  Don't try to create a pseudo.  */
1545   if (bad_hard_regs)
1546     return NULL_RTX;
1547 
1548   /* We haven't found a register from original operations.  Get a new one.
1549      FIXME: control register pressure somehow.  */
1550   {
1551     rtx new_reg = gen_reg_rtx (mode);
1552 
1553     gcc_assert (mode != VOIDmode);
1554 
1555     max_regno = max_reg_num ();
1556     maybe_extend_reg_info_p ();
1557     REG_N_CALLS_CROSSED (REGNO (new_reg)) = reg_rename_p->crosses_call ? 1 : 0;
1558 
1559     return new_reg;
1560   }
1561 }
1562 
1563 /* True when target of EXPR is available due to EXPR_TARGET_AVAILABLE,
1564    USED_REGS and REG_RENAME_P->UNAVAILABLE_HARD_REGS.  */
1565 static void
1566 verify_target_availability (expr_t expr, regset used_regs,
1567 			    struct reg_rename *reg_rename_p)
1568 {
1569   unsigned n, i, regno;
1570   enum machine_mode mode;
1571   bool target_available, live_available, hard_available;
1572 
1573   if (!REG_P (EXPR_LHS (expr)) || EXPR_TARGET_AVAILABLE (expr) < 0)
1574     return;
1575 
1576   regno = expr_dest_regno (expr);
1577   mode = GET_MODE (EXPR_LHS (expr));
1578   target_available = EXPR_TARGET_AVAILABLE (expr) == 1;
1579   n = HARD_REGISTER_NUM_P (regno) ? hard_regno_nregs[regno][mode] : 1;
1580 
1581   live_available = hard_available = true;
1582   for (i = 0; i < n; i++)
1583     {
1584       if (bitmap_bit_p (used_regs, regno + i))
1585         live_available = false;
1586       if (TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, regno + i))
1587         hard_available = false;
1588     }
1589 
1590   /* When target is not available, it may be due to hard register
1591      restrictions, e.g. crosses calls, so we check hard_available too.  */
1592   if (target_available)
1593     gcc_assert (live_available);
1594   else
1595     /* Check only if we haven't scheduled something on the previous fence,
1596        cause due to MAX_SOFTWARE_LOOKAHEAD_WINDOW_SIZE issues
1597        and having more than one fence, we may end having targ_un in a block
1598        in which successors target register is actually available.
1599 
1600        The last condition handles the case when a dependence from a call insn
1601        was created in sched-deps.c for insns with destination registers that
1602        never crossed a call before, but do cross one after our code motion.
1603 
1604        FIXME: in the latter case, we just uselessly called find_used_regs,
1605        because we can't move this expression with any other register
1606        as well.  */
1607     gcc_assert (scheduled_something_on_previous_fence || !live_available
1608 		|| !hard_available
1609 		|| (!reload_completed && reg_rename_p->crosses_call
1610 		    && REG_N_CALLS_CROSSED (regno) == 0));
1611 }
1612 
1613 /* Collect unavailable registers due to liveness for EXPR from BNDS
1614    into USED_REGS.  Save additional information about available
1615    registers and unavailable due to hardware restriction registers
1616    into REG_RENAME_P structure.  Save original insns into ORIGINAL_INSNS
1617    list.  */
1618 static void
1619 collect_unavailable_regs_from_bnds (expr_t expr, blist_t bnds, regset used_regs,
1620 				    struct reg_rename *reg_rename_p,
1621 				    def_list_t *original_insns)
1622 {
1623   for (; bnds; bnds = BLIST_NEXT (bnds))
1624     {
1625       bool res;
1626       av_set_t orig_ops = NULL;
1627       bnd_t bnd = BLIST_BND (bnds);
1628 
1629       /* If the chosen best expr doesn't belong to current boundary,
1630 	 skip it.  */
1631       if (!av_set_is_in_p (BND_AV1 (bnd), EXPR_VINSN (expr)))
1632 	continue;
1633 
1634       /* Put in ORIG_OPS all exprs from this boundary that became
1635 	 RES on top.  */
1636       orig_ops = find_sequential_best_exprs (bnd, expr, false);
1637 
1638       /* Compute used regs and OR it into the USED_REGS.  */
1639       res = find_used_regs (BND_TO (bnd), orig_ops, used_regs,
1640 			    reg_rename_p, original_insns);
1641 
1642       /* FIXME: the assert is true until we'd have several boundaries.  */
1643       gcc_assert (res);
1644       av_set_clear (&orig_ops);
1645     }
1646 }
1647 
1648 /* Return TRUE if it is possible to replace LHSes of ORIG_INSNS with BEST_REG.
1649    If BEST_REG is valid, replace LHS of EXPR with it.  */
1650 static bool
1651 try_replace_dest_reg (ilist_t orig_insns, rtx best_reg, expr_t expr)
1652 {
1653   /* Try whether we'll be able to generate the insn
1654      'dest := best_reg' at the place of the original operation.  */
1655   for (; orig_insns; orig_insns = ILIST_NEXT (orig_insns))
1656     {
1657       insn_t orig_insn = DEF_LIST_DEF (orig_insns)->orig_insn;
1658 
1659       gcc_assert (EXPR_SEPARABLE_P (INSN_EXPR (orig_insn)));
1660 
1661       if (REGNO (best_reg) != REGNO (INSN_LHS (orig_insn))
1662 	  && (! replace_src_with_reg_ok_p (orig_insn, best_reg)
1663 	      || ! replace_dest_with_reg_ok_p (orig_insn, best_reg)))
1664 	return false;
1665     }
1666 
1667   /* Make sure that EXPR has the right destination
1668      register.  */
1669   if (expr_dest_regno (expr) != REGNO (best_reg))
1670     replace_dest_with_reg_in_expr (expr, best_reg);
1671   else
1672     EXPR_TARGET_AVAILABLE (expr) = 1;
1673 
1674   return true;
1675 }
1676 
1677 /* Select and assign best register to EXPR searching from BNDS.
1678    Set *IS_ORIG_REG_P to TRUE if original register was selected.
1679    Return FALSE if no register can be chosen, which could happen when:
1680    * EXPR_SEPARABLE_P is true but we were unable to find suitable register;
1681    * EXPR_SEPARABLE_P is false but the insn sets/clobbers one of the registers
1682      that are used on the moving path.  */
1683 static bool
1684 find_best_reg_for_expr (expr_t expr, blist_t bnds, bool *is_orig_reg_p)
1685 {
1686   static struct reg_rename reg_rename_data;
1687 
1688   regset used_regs;
1689   def_list_t original_insns = NULL;
1690   bool reg_ok;
1691 
1692   *is_orig_reg_p = false;
1693 
1694   /* Don't bother to do anything if this insn doesn't set any registers.  */
1695   if (bitmap_empty_p (VINSN_REG_SETS (EXPR_VINSN (expr)))
1696       && bitmap_empty_p (VINSN_REG_CLOBBERS (EXPR_VINSN (expr))))
1697     return true;
1698 
1699   used_regs = get_clear_regset_from_pool ();
1700   CLEAR_HARD_REG_SET (reg_rename_data.unavailable_hard_regs);
1701 
1702   collect_unavailable_regs_from_bnds (expr, bnds, used_regs, &reg_rename_data,
1703 				      &original_insns);
1704 
1705 #ifdef ENABLE_CHECKING
1706   /* If after reload, make sure we're working with hard regs here.  */
1707   if (reload_completed)
1708     {
1709       reg_set_iterator rsi;
1710       unsigned i;
1711 
1712       EXECUTE_IF_SET_IN_REG_SET (used_regs, FIRST_PSEUDO_REGISTER, i, rsi)
1713         gcc_unreachable ();
1714     }
1715 #endif
1716 
1717   if (EXPR_SEPARABLE_P (expr))
1718     {
1719       rtx best_reg = NULL_RTX;
1720       /* Check that we have computed availability of a target register
1721 	 correctly.  */
1722       verify_target_availability (expr, used_regs, &reg_rename_data);
1723 
1724       /* Turn everything in hard regs after reload.  */
1725       if (reload_completed)
1726 	{
1727 	  HARD_REG_SET hard_regs_used;
1728 	  REG_SET_TO_HARD_REG_SET (hard_regs_used, used_regs);
1729 
1730 	  /* Join hard registers unavailable due to register class
1731 	     restrictions and live range intersection.  */
1732 	  IOR_HARD_REG_SET (hard_regs_used,
1733 			    reg_rename_data.unavailable_hard_regs);
1734 
1735 	  best_reg = choose_best_reg (hard_regs_used, &reg_rename_data,
1736 				      original_insns, is_orig_reg_p);
1737 	}
1738       else
1739 	best_reg = choose_best_pseudo_reg (used_regs, &reg_rename_data,
1740 					   original_insns, is_orig_reg_p);
1741 
1742       if (!best_reg)
1743 	reg_ok = false;
1744       else if (*is_orig_reg_p)
1745 	{
1746 	  /* In case of unification BEST_REG may be different from EXPR's LHS
1747 	     when EXPR's LHS is unavailable, and there is another LHS among
1748 	     ORIGINAL_INSNS.  */
1749 	  reg_ok = try_replace_dest_reg (original_insns, best_reg, expr);
1750 	}
1751       else
1752 	{
1753 	  /* Forbid renaming of low-cost insns.  */
1754 	  if (sel_vinsn_cost (EXPR_VINSN (expr)) < 2)
1755 	    reg_ok = false;
1756 	  else
1757 	    reg_ok = try_replace_dest_reg (original_insns, best_reg, expr);
1758 	}
1759     }
1760   else
1761     {
1762       /* If !EXPR_SCHEDULE_AS_RHS (EXPR), just make sure INSN doesn't set
1763 	 any of the HARD_REGS_USED set.  */
1764       if (vinsn_writes_one_of_regs_p (EXPR_VINSN (expr), used_regs,
1765 				      reg_rename_data.unavailable_hard_regs))
1766 	{
1767 	  reg_ok = false;
1768 	  gcc_assert (EXPR_TARGET_AVAILABLE (expr) <= 0);
1769 	}
1770       else
1771 	{
1772 	  reg_ok = true;
1773 	  gcc_assert (EXPR_TARGET_AVAILABLE (expr) != 0);
1774 	}
1775     }
1776 
1777   ilist_clear (&original_insns);
1778   return_regset_to_pool (used_regs);
1779 
1780   return reg_ok;
1781 }
1782 
1783 
1784 /* Return true if dependence described by DS can be overcomed.  */
1785 static bool
1786 can_speculate_dep_p (ds_t ds)
1787 {
1788   if (spec_info == NULL)
1789     return false;
1790 
1791   /* Leave only speculative data.  */
1792   ds &= SPECULATIVE;
1793 
1794   if (ds == 0)
1795     return false;
1796 
1797   {
1798     /* FIXME: make sched-deps.c produce only those non-hard dependencies,
1799        that we can overcome.  */
1800     ds_t spec_mask = spec_info->mask;
1801 
1802     if ((ds & spec_mask) != ds)
1803       return false;
1804   }
1805 
1806   if (ds_weak (ds) < spec_info->data_weakness_cutoff)
1807     return false;
1808 
1809   return true;
1810 }
1811 
1812 /* Get a speculation check instruction.
1813    C_EXPR is a speculative expression,
1814    CHECK_DS describes speculations that should be checked,
1815    ORIG_INSN is the original non-speculative insn in the stream.  */
1816 static insn_t
1817 create_speculation_check (expr_t c_expr, ds_t check_ds, insn_t orig_insn)
1818 {
1819   rtx check_pattern;
1820   rtx insn_rtx;
1821   insn_t insn;
1822   basic_block recovery_block;
1823   rtx label;
1824 
1825   /* Create a recovery block if target is going to emit branchy check, or if
1826      ORIG_INSN was speculative already.  */
1827   if (targetm.sched.needs_block_p (check_ds)
1828       || EXPR_SPEC_DONE_DS (INSN_EXPR (orig_insn)) != 0)
1829     {
1830       recovery_block = sel_create_recovery_block (orig_insn);
1831       label = BB_HEAD (recovery_block);
1832     }
1833   else
1834     {
1835       recovery_block = NULL;
1836       label = NULL_RTX;
1837     }
1838 
1839   /* Get pattern of the check.  */
1840   check_pattern = targetm.sched.gen_spec_check (EXPR_INSN_RTX (c_expr), label,
1841 						check_ds);
1842 
1843   gcc_assert (check_pattern != NULL);
1844 
1845   /* Emit check.  */
1846   insn_rtx = create_insn_rtx_from_pattern (check_pattern, label);
1847 
1848   insn = sel_gen_insn_from_rtx_after (insn_rtx, INSN_EXPR (orig_insn),
1849 				      INSN_SEQNO (orig_insn), orig_insn);
1850 
1851   /* Make check to be non-speculative.  */
1852   EXPR_SPEC_DONE_DS (INSN_EXPR (insn)) = 0;
1853   INSN_SPEC_CHECKED_DS (insn) = check_ds;
1854 
1855   /* Decrease priority of check by difference of load/check instruction
1856      latencies.  */
1857   EXPR_PRIORITY (INSN_EXPR (insn)) -= (sel_vinsn_cost (INSN_VINSN (orig_insn))
1858 				       - sel_vinsn_cost (INSN_VINSN (insn)));
1859 
1860   /* Emit copy of original insn (though with replaced target register,
1861      if needed) to the recovery block.  */
1862   if (recovery_block != NULL)
1863     {
1864       rtx twin_rtx;
1865 
1866       twin_rtx = copy_rtx (PATTERN (EXPR_INSN_RTX (c_expr)));
1867       twin_rtx = create_insn_rtx_from_pattern (twin_rtx, NULL_RTX);
1868       sel_gen_recovery_insn_from_rtx_after (twin_rtx,
1869 					    INSN_EXPR (orig_insn),
1870 					    INSN_SEQNO (insn),
1871 					    bb_note (recovery_block));
1872     }
1873 
1874   /* If we've generated a data speculation check, make sure
1875      that all the bookkeeping instruction we'll create during
1876      this move_op () will allocate an ALAT entry so that the
1877      check won't fail.
1878      In case of control speculation we must convert C_EXPR to control
1879      speculative mode, because failing to do so will bring us an exception
1880      thrown by the non-control-speculative load.  */
1881   check_ds = ds_get_max_dep_weak (check_ds);
1882   speculate_expr (c_expr, check_ds);
1883 
1884   return insn;
1885 }
1886 
1887 /* True when INSN is a "regN = regN" copy.  */
1888 static bool
1889 identical_copy_p (rtx insn)
1890 {
1891   rtx lhs, rhs, pat;
1892 
1893   pat = PATTERN (insn);
1894 
1895   if (GET_CODE (pat) != SET)
1896     return false;
1897 
1898   lhs = SET_DEST (pat);
1899   if (!REG_P (lhs))
1900     return false;
1901 
1902   rhs = SET_SRC (pat);
1903   if (!REG_P (rhs))
1904     return false;
1905 
1906   return REGNO (lhs) == REGNO (rhs);
1907 }
1908 
1909 /* Undo all transformations on *AV_PTR that were done when
1910    moving through INSN.  */
1911 static void
1912 undo_transformations (av_set_t *av_ptr, rtx insn)
1913 {
1914   av_set_iterator av_iter;
1915   expr_t expr;
1916   av_set_t new_set = NULL;
1917 
1918   /* First, kill any EXPR that uses registers set by an insn.  This is
1919      required for correctness.  */
1920   FOR_EACH_EXPR_1 (expr, av_iter, av_ptr)
1921     if (!sched_insns_conditions_mutex_p (insn, EXPR_INSN_RTX (expr))
1922         && bitmap_intersect_p (INSN_REG_SETS (insn),
1923                                VINSN_REG_USES (EXPR_VINSN (expr)))
1924         /* When an insn looks like 'r1 = r1', we could substitute through
1925            it, but the above condition will still hold.  This happened with
1926            gcc.c-torture/execute/961125-1.c.  */
1927         && !identical_copy_p (insn))
1928       {
1929         if (sched_verbose >= 6)
1930           sel_print ("Expr %d removed due to use/set conflict\n",
1931                      INSN_UID (EXPR_INSN_RTX (expr)));
1932         av_set_iter_remove (&av_iter);
1933       }
1934 
1935   /* Undo transformations looking at the history vector.  */
1936   FOR_EACH_EXPR (expr, av_iter, *av_ptr)
1937     {
1938       int index = find_in_history_vect (EXPR_HISTORY_OF_CHANGES (expr),
1939                                         insn, EXPR_VINSN (expr), true);
1940 
1941       if (index >= 0)
1942         {
1943           expr_history_def *phist;
1944 
1945           phist = VEC_index (expr_history_def,
1946                              EXPR_HISTORY_OF_CHANGES (expr),
1947                              index);
1948 
1949           switch (phist->type)
1950             {
1951             case TRANS_SPECULATION:
1952               {
1953                 ds_t old_ds, new_ds;
1954 
1955                 /* Compute the difference between old and new speculative
1956                    statuses: that's what we need to check.
1957                    Earlier we used to assert that the status will really
1958                    change.  This no longer works because only the probability
1959                    bits in the status may have changed during compute_av_set,
1960                    and in the case of merging different probabilities of the
1961                    same speculative status along different paths we do not
1962                    record this in the history vector.  */
1963                 old_ds = phist->spec_ds;
1964                 new_ds = EXPR_SPEC_DONE_DS (expr);
1965 
1966                 old_ds &= SPECULATIVE;
1967                 new_ds &= SPECULATIVE;
1968                 new_ds &= ~old_ds;
1969 
1970                 EXPR_SPEC_TO_CHECK_DS (expr) |= new_ds;
1971                 break;
1972               }
1973             case TRANS_SUBSTITUTION:
1974               {
1975                 expr_def _tmp_expr, *tmp_expr = &_tmp_expr;
1976                 vinsn_t new_vi;
1977                 bool add = true;
1978 
1979                 new_vi = phist->old_expr_vinsn;
1980 
1981                 gcc_assert (VINSN_SEPARABLE_P (new_vi)
1982                             == EXPR_SEPARABLE_P (expr));
1983                 copy_expr (tmp_expr, expr);
1984 
1985                 if (vinsn_equal_p (phist->new_expr_vinsn,
1986                                    EXPR_VINSN (tmp_expr)))
1987                   change_vinsn_in_expr (tmp_expr, new_vi);
1988                 else
1989                   /* This happens when we're unsubstituting on a bookkeeping
1990                      copy, which was in turn substituted.  The history is wrong
1991                      in this case.  Do it the hard way.  */
1992                   add = substitute_reg_in_expr (tmp_expr, insn, true);
1993                 if (add)
1994                   av_set_add (&new_set, tmp_expr);
1995                 clear_expr (tmp_expr);
1996                 break;
1997               }
1998             default:
1999               gcc_unreachable ();
2000             }
2001         }
2002 
2003     }
2004 
2005   av_set_union_and_clear (av_ptr, &new_set, NULL);
2006 }
2007 
2008 
2009 /* Moveup_* helpers for code motion and computing av sets.  */
2010 
2011 /* Propagates EXPR inside an insn group through THROUGH_INSN.
2012    The difference from the below function is that only substitution is
2013    performed.  */
2014 static enum MOVEUP_EXPR_CODE
2015 moveup_expr_inside_insn_group (expr_t expr, insn_t through_insn)
2016 {
2017   vinsn_t vi = EXPR_VINSN (expr);
2018   ds_t *has_dep_p;
2019   ds_t full_ds;
2020 
2021   /* Do this only inside insn group.  */
2022   gcc_assert (INSN_SCHED_CYCLE (through_insn) > 0);
2023 
2024   full_ds = has_dependence_p (expr, through_insn, &has_dep_p);
2025   if (full_ds == 0)
2026     return MOVEUP_EXPR_SAME;
2027 
2028   /* Substitution is the possible choice in this case.  */
2029   if (has_dep_p[DEPS_IN_RHS])
2030     {
2031       /* Can't substitute UNIQUE VINSNs.  */
2032       gcc_assert (!VINSN_UNIQUE_P (vi));
2033 
2034       if (can_substitute_through_p (through_insn,
2035                                     has_dep_p[DEPS_IN_RHS])
2036           && substitute_reg_in_expr (expr, through_insn, false))
2037         {
2038           EXPR_WAS_SUBSTITUTED (expr) = true;
2039           return MOVEUP_EXPR_CHANGED;
2040         }
2041 
2042       /* Don't care about this, as even true dependencies may be allowed
2043          in an insn group.  */
2044       return MOVEUP_EXPR_SAME;
2045     }
2046 
2047   /* This can catch output dependencies in COND_EXECs.  */
2048   if (has_dep_p[DEPS_IN_INSN])
2049     return MOVEUP_EXPR_NULL;
2050 
2051   /* This is either an output or an anti dependence, which usually have
2052      a zero latency.  Allow this here, if we'd be wrong, tick_check_p
2053      will fix this.  */
2054   gcc_assert (has_dep_p[DEPS_IN_LHS]);
2055   return MOVEUP_EXPR_AS_RHS;
2056 }
2057 
2058 /* True when a trapping EXPR cannot be moved through THROUGH_INSN.  */
2059 #define CANT_MOVE_TRAPPING(expr, through_insn)                \
2060   (VINSN_MAY_TRAP_P (EXPR_VINSN (expr))                       \
2061    && !sel_insn_has_single_succ_p ((through_insn), SUCCS_ALL) \
2062    && !sel_insn_is_speculation_check (through_insn))
2063 
2064 /* True when a conflict on a target register was found during moveup_expr.  */
2065 static bool was_target_conflict = false;
2066 
2067 /* Return true when moving a debug INSN across THROUGH_INSN will
2068    create a bookkeeping block.  We don't want to create such blocks,
2069    for they would cause codegen differences between compilations with
2070    and without debug info.  */
2071 
2072 static bool
2073 moving_insn_creates_bookkeeping_block_p (insn_t insn,
2074 					 insn_t through_insn)
2075 {
2076   basic_block bbi, bbt;
2077   edge e1, e2;
2078   edge_iterator ei1, ei2;
2079 
2080   if (!bookkeeping_can_be_created_if_moved_through_p (through_insn))
2081     {
2082       if (sched_verbose >= 9)
2083 	sel_print ("no bookkeeping required: ");
2084       return FALSE;
2085     }
2086 
2087   bbi = BLOCK_FOR_INSN (insn);
2088 
2089   if (EDGE_COUNT (bbi->preds) == 1)
2090     {
2091       if (sched_verbose >= 9)
2092 	sel_print ("only one pred edge: ");
2093       return TRUE;
2094     }
2095 
2096   bbt = BLOCK_FOR_INSN (through_insn);
2097 
2098   FOR_EACH_EDGE (e1, ei1, bbt->succs)
2099     {
2100       FOR_EACH_EDGE (e2, ei2, bbi->preds)
2101 	{
2102 	  if (find_block_for_bookkeeping (e1, e2, TRUE))
2103 	    {
2104 	      if (sched_verbose >= 9)
2105 		sel_print ("found existing block: ");
2106 	      return FALSE;
2107 	    }
2108 	}
2109     }
2110 
2111   if (sched_verbose >= 9)
2112     sel_print ("would create bookkeeping block: ");
2113 
2114   return TRUE;
2115 }
2116 
2117 /* Return true when the conflict with newly created implicit clobbers
2118    between EXPR and THROUGH_INSN is found because of renaming.  */
2119 static bool
2120 implicit_clobber_conflict_p (insn_t through_insn, expr_t expr)
2121 {
2122   HARD_REG_SET temp;
2123   rtx insn, reg, rhs, pat;
2124   hard_reg_set_iterator hrsi;
2125   unsigned regno;
2126   bool valid;
2127 
2128   /* Make a new pseudo register.  */
2129   reg = gen_reg_rtx (GET_MODE (EXPR_LHS (expr)));
2130   max_regno = max_reg_num ();
2131   maybe_extend_reg_info_p ();
2132 
2133   /* Validate a change and bail out early.  */
2134   insn = EXPR_INSN_RTX (expr);
2135   validate_change (insn, &SET_DEST (PATTERN (insn)), reg, true);
2136   valid = verify_changes (0);
2137   cancel_changes (0);
2138   if (!valid)
2139     {
2140       if (sched_verbose >= 6)
2141 	sel_print ("implicit clobbers failed validation, ");
2142       return true;
2143     }
2144 
2145   /* Make a new insn with it.  */
2146   rhs = copy_rtx (VINSN_RHS (EXPR_VINSN (expr)));
2147   pat = gen_rtx_SET (VOIDmode, reg, rhs);
2148   start_sequence ();
2149   insn = emit_insn (pat);
2150   end_sequence ();
2151 
2152   /* Calculate implicit clobbers.  */
2153   extract_insn (insn);
2154   preprocess_constraints ();
2155   ira_implicitly_set_insn_hard_regs (&temp);
2156   AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2157 
2158   /* If any implicit clobber registers intersect with regular ones in
2159      through_insn, we have a dependency and thus bail out.  */
2160   EXECUTE_IF_SET_IN_HARD_REG_SET (temp, 0, regno, hrsi)
2161     {
2162       vinsn_t vi = INSN_VINSN (through_insn);
2163       if (bitmap_bit_p (VINSN_REG_SETS (vi), regno)
2164 	  || bitmap_bit_p (VINSN_REG_CLOBBERS (vi), regno)
2165 	  || bitmap_bit_p (VINSN_REG_USES (vi), regno))
2166 	return true;
2167     }
2168 
2169   return false;
2170 }
2171 
2172 /* Modifies EXPR so it can be moved through the THROUGH_INSN,
2173    performing necessary transformations.  Record the type of transformation
2174    made in PTRANS_TYPE, when it is not NULL.  When INSIDE_INSN_GROUP,
2175    permit all dependencies except true ones, and try to remove those
2176    too via forward substitution.  All cases when a non-eliminable
2177    non-zero cost dependency exists inside an insn group will be fixed
2178    in tick_check_p instead.  */
2179 static enum MOVEUP_EXPR_CODE
2180 moveup_expr (expr_t expr, insn_t through_insn, bool inside_insn_group,
2181             enum local_trans_type *ptrans_type)
2182 {
2183   vinsn_t vi = EXPR_VINSN (expr);
2184   insn_t insn = VINSN_INSN_RTX (vi);
2185   bool was_changed = false;
2186   bool as_rhs = false;
2187   ds_t *has_dep_p;
2188   ds_t full_ds;
2189 
2190   /* ??? We use dependencies of non-debug insns on debug insns to
2191      indicate that the debug insns need to be reset if the non-debug
2192      insn is pulled ahead of it.  It's hard to figure out how to
2193      introduce such a notion in sel-sched, but it already fails to
2194      support debug insns in other ways, so we just go ahead and
2195      let the deug insns go corrupt for now.  */
2196   if (DEBUG_INSN_P (through_insn) && !DEBUG_INSN_P (insn))
2197     return MOVEUP_EXPR_SAME;
2198 
2199   /* When inside_insn_group, delegate to the helper.  */
2200   if (inside_insn_group)
2201     return moveup_expr_inside_insn_group (expr, through_insn);
2202 
2203   /* Deal with unique insns and control dependencies.  */
2204   if (VINSN_UNIQUE_P (vi))
2205     {
2206       /* We can move jumps without side-effects or jumps that are
2207 	 mutually exclusive with instruction THROUGH_INSN (all in cases
2208 	 dependencies allow to do so and jump is not speculative).  */
2209       if (control_flow_insn_p (insn))
2210         {
2211           basic_block fallthru_bb;
2212 
2213           /* Do not move checks and do not move jumps through other
2214              jumps.  */
2215           if (control_flow_insn_p (through_insn)
2216               || sel_insn_is_speculation_check (insn))
2217             return MOVEUP_EXPR_NULL;
2218 
2219           /* Don't move jumps through CFG joins.  */
2220           if (bookkeeping_can_be_created_if_moved_through_p (through_insn))
2221             return MOVEUP_EXPR_NULL;
2222 
2223           /* The jump should have a clear fallthru block, and
2224              this block should be in the current region.  */
2225           if ((fallthru_bb = fallthru_bb_of_jump (insn)) == NULL
2226               || ! in_current_region_p (fallthru_bb))
2227             return MOVEUP_EXPR_NULL;
2228 
2229           /* And it should be mutually exclusive with through_insn.  */
2230           if (! sched_insns_conditions_mutex_p (insn, through_insn)
2231 	      && ! DEBUG_INSN_P (through_insn))
2232             return MOVEUP_EXPR_NULL;
2233         }
2234 
2235       /* Don't move what we can't move.  */
2236       if (EXPR_CANT_MOVE (expr)
2237 	  && BLOCK_FOR_INSN (through_insn) != BLOCK_FOR_INSN (insn))
2238 	return MOVEUP_EXPR_NULL;
2239 
2240       /* Don't move SCHED_GROUP instruction through anything.
2241          If we don't force this, then it will be possible to start
2242          scheduling a sched_group before all its dependencies are
2243          resolved.
2244          ??? Haifa deals with this issue by delaying the SCHED_GROUP
2245          as late as possible through rank_for_schedule.  */
2246       if (SCHED_GROUP_P (insn))
2247 	return MOVEUP_EXPR_NULL;
2248     }
2249   else
2250     gcc_assert (!control_flow_insn_p (insn));
2251 
2252   /* Don't move debug insns if this would require bookkeeping.  */
2253   if (DEBUG_INSN_P (insn)
2254       && BLOCK_FOR_INSN (through_insn) != BLOCK_FOR_INSN (insn)
2255       && moving_insn_creates_bookkeeping_block_p (insn, through_insn))
2256     return MOVEUP_EXPR_NULL;
2257 
2258   /* Deal with data dependencies.  */
2259   was_target_conflict = false;
2260   full_ds = has_dependence_p (expr, through_insn, &has_dep_p);
2261   if (full_ds == 0)
2262     {
2263       if (!CANT_MOVE_TRAPPING (expr, through_insn))
2264 	return MOVEUP_EXPR_SAME;
2265     }
2266   else
2267     {
2268       /* We can move UNIQUE insn up only as a whole and unchanged,
2269          so it shouldn't have any dependencies.  */
2270       if (VINSN_UNIQUE_P (vi))
2271 	return MOVEUP_EXPR_NULL;
2272     }
2273 
2274   if (full_ds != 0 && can_speculate_dep_p (full_ds))
2275     {
2276       int res;
2277 
2278       res = speculate_expr (expr, full_ds);
2279       if (res >= 0)
2280 	{
2281           /* Speculation was successful.  */
2282           full_ds = 0;
2283           was_changed = (res > 0);
2284           if (res == 2)
2285             was_target_conflict = true;
2286           if (ptrans_type)
2287             *ptrans_type = TRANS_SPECULATION;
2288 	  sel_clear_has_dependence ();
2289 	}
2290     }
2291 
2292   if (has_dep_p[DEPS_IN_INSN])
2293     /* We have some dependency that cannot be discarded.  */
2294     return MOVEUP_EXPR_NULL;
2295 
2296   if (has_dep_p[DEPS_IN_LHS])
2297     {
2298       /* Only separable insns can be moved up with the new register.
2299          Anyways, we should mark that the original register is
2300          unavailable.  */
2301       if (!enable_schedule_as_rhs_p || !EXPR_SEPARABLE_P (expr))
2302         return MOVEUP_EXPR_NULL;
2303 
2304       /* When renaming a hard register to a pseudo before reload, extra
2305 	 dependencies can occur from the implicit clobbers of the insn.
2306 	 Filter out such cases here.  */
2307       if (!reload_completed && REG_P (EXPR_LHS (expr))
2308 	  && HARD_REGISTER_P (EXPR_LHS (expr))
2309 	  && implicit_clobber_conflict_p (through_insn, expr))
2310 	{
2311 	  if (sched_verbose >= 6)
2312 	    sel_print ("implicit clobbers conflict detected, ");
2313 	  return MOVEUP_EXPR_NULL;
2314 	}
2315       EXPR_TARGET_AVAILABLE (expr) = false;
2316       was_target_conflict = true;
2317       as_rhs = true;
2318     }
2319 
2320   /* At this point we have either separable insns, that will be lifted
2321      up only as RHSes, or non-separable insns with no dependency in lhs.
2322      If dependency is in RHS, then try to perform substitution and move up
2323      substituted RHS:
2324 
2325       Ex. 1:				  Ex.2
2326 	y = x;				    y = x;
2327 	z = y*2;			    y = y*2;
2328 
2329     In Ex.1 y*2 can be substituted for x*2 and the whole operation can be
2330     moved above y=x assignment as z=x*2.
2331 
2332     In Ex.2 y*2 also can be substituted for x*2, but only the right hand
2333     side can be moved because of the output dependency.  The operation was
2334     cropped to its rhs above.  */
2335   if (has_dep_p[DEPS_IN_RHS])
2336     {
2337       ds_t *rhs_dsp = &has_dep_p[DEPS_IN_RHS];
2338 
2339       /* Can't substitute UNIQUE VINSNs.  */
2340       gcc_assert (!VINSN_UNIQUE_P (vi));
2341 
2342       if (can_speculate_dep_p (*rhs_dsp))
2343 	{
2344           int res;
2345 
2346           res = speculate_expr (expr, *rhs_dsp);
2347           if (res >= 0)
2348             {
2349               /* Speculation was successful.  */
2350               *rhs_dsp = 0;
2351               was_changed = (res > 0);
2352               if (res == 2)
2353                 was_target_conflict = true;
2354               if (ptrans_type)
2355                 *ptrans_type = TRANS_SPECULATION;
2356             }
2357 	  else
2358 	    return MOVEUP_EXPR_NULL;
2359 	}
2360       else if (can_substitute_through_p (through_insn,
2361                                          *rhs_dsp)
2362                && substitute_reg_in_expr (expr, through_insn, false))
2363 	{
2364           /* ??? We cannot perform substitution AND speculation on the same
2365              insn.  */
2366           gcc_assert (!was_changed);
2367           was_changed = true;
2368           if (ptrans_type)
2369             *ptrans_type = TRANS_SUBSTITUTION;
2370           EXPR_WAS_SUBSTITUTED (expr) = true;
2371 	}
2372       else
2373 	return MOVEUP_EXPR_NULL;
2374     }
2375 
2376   /* Don't move trapping insns through jumps.
2377      This check should be at the end to give a chance to control speculation
2378      to perform its duties.  */
2379   if (CANT_MOVE_TRAPPING (expr, through_insn))
2380     return MOVEUP_EXPR_NULL;
2381 
2382   return (was_changed
2383           ? MOVEUP_EXPR_CHANGED
2384           : (as_rhs
2385              ? MOVEUP_EXPR_AS_RHS
2386              : MOVEUP_EXPR_SAME));
2387 }
2388 
2389 /* Try to look at bitmap caches for EXPR and INSN pair, return true
2390    if successful.  When INSIDE_INSN_GROUP, also try ignore dependencies
2391    that can exist within a parallel group.  Write to RES the resulting
2392    code for moveup_expr.  */
2393 static bool
2394 try_bitmap_cache (expr_t expr, insn_t insn,
2395                   bool inside_insn_group,
2396                   enum MOVEUP_EXPR_CODE *res)
2397 {
2398   int expr_uid = INSN_UID (EXPR_INSN_RTX (expr));
2399 
2400   /* First check whether we've analyzed this situation already.  */
2401   if (bitmap_bit_p (INSN_ANALYZED_DEPS (insn), expr_uid))
2402     {
2403       if (bitmap_bit_p (INSN_FOUND_DEPS (insn), expr_uid))
2404         {
2405           if (sched_verbose >= 6)
2406             sel_print ("removed (cached)\n");
2407           *res = MOVEUP_EXPR_NULL;
2408           return true;
2409         }
2410       else
2411         {
2412           if (sched_verbose >= 6)
2413             sel_print ("unchanged (cached)\n");
2414           *res = MOVEUP_EXPR_SAME;
2415           return true;
2416         }
2417     }
2418   else if (bitmap_bit_p (INSN_FOUND_DEPS (insn), expr_uid))
2419     {
2420       if (inside_insn_group)
2421         {
2422           if (sched_verbose >= 6)
2423             sel_print ("unchanged (as RHS, cached, inside insn group)\n");
2424           *res = MOVEUP_EXPR_SAME;
2425           return true;
2426 
2427         }
2428       else
2429         EXPR_TARGET_AVAILABLE (expr) = false;
2430 
2431       /* This is the only case when propagation result can change over time,
2432          as we can dynamically switch off scheduling as RHS.  In this case,
2433          just check the flag to reach the correct decision.  */
2434       if (enable_schedule_as_rhs_p)
2435         {
2436           if (sched_verbose >= 6)
2437             sel_print ("unchanged (as RHS, cached)\n");
2438           *res = MOVEUP_EXPR_AS_RHS;
2439           return true;
2440         }
2441       else
2442         {
2443           if (sched_verbose >= 6)
2444             sel_print ("removed (cached as RHS, but renaming"
2445                        " is now disabled)\n");
2446           *res = MOVEUP_EXPR_NULL;
2447           return true;
2448         }
2449     }
2450 
2451   return false;
2452 }
2453 
2454 /* Try to look at bitmap caches for EXPR and INSN pair, return true
2455    if successful.  Write to RES the resulting code for moveup_expr.  */
2456 static bool
2457 try_transformation_cache (expr_t expr, insn_t insn,
2458                           enum MOVEUP_EXPR_CODE *res)
2459 {
2460   struct transformed_insns *pti
2461     = (struct transformed_insns *)
2462     htab_find_with_hash (INSN_TRANSFORMED_INSNS (insn),
2463                          &EXPR_VINSN (expr),
2464                          VINSN_HASH_RTX (EXPR_VINSN (expr)));
2465   if (pti)
2466     {
2467       /* This EXPR was already moved through this insn and was
2468          changed as a result.  Fetch the proper data from
2469          the hashtable.  */
2470       insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
2471                               INSN_UID (insn), pti->type,
2472                               pti->vinsn_old, pti->vinsn_new,
2473                               EXPR_SPEC_DONE_DS (expr));
2474 
2475       if (INSN_IN_STREAM_P (VINSN_INSN_RTX (pti->vinsn_new)))
2476         pti->vinsn_new = vinsn_copy (pti->vinsn_new, true);
2477       change_vinsn_in_expr (expr, pti->vinsn_new);
2478       if (pti->was_target_conflict)
2479         EXPR_TARGET_AVAILABLE (expr) = false;
2480       if (pti->type == TRANS_SPECULATION)
2481         {
2482           EXPR_SPEC_DONE_DS (expr) = pti->ds;
2483           EXPR_NEEDS_SPEC_CHECK_P (expr) |= pti->needs_check;
2484         }
2485 
2486       if (sched_verbose >= 6)
2487         {
2488           sel_print ("changed (cached): ");
2489           dump_expr (expr);
2490           sel_print ("\n");
2491         }
2492 
2493       *res = MOVEUP_EXPR_CHANGED;
2494       return true;
2495     }
2496 
2497   return false;
2498 }
2499 
2500 /* Update bitmap caches on INSN with result RES of propagating EXPR.  */
2501 static void
2502 update_bitmap_cache (expr_t expr, insn_t insn, bool inside_insn_group,
2503                      enum MOVEUP_EXPR_CODE res)
2504 {
2505   int expr_uid = INSN_UID (EXPR_INSN_RTX (expr));
2506 
2507   /* Do not cache result of propagating jumps through an insn group,
2508      as it is always true, which is not useful outside the group.  */
2509   if (inside_insn_group)
2510     return;
2511 
2512   if (res == MOVEUP_EXPR_NULL)
2513     {
2514       bitmap_set_bit (INSN_ANALYZED_DEPS (insn), expr_uid);
2515       bitmap_set_bit (INSN_FOUND_DEPS (insn), expr_uid);
2516     }
2517   else if (res == MOVEUP_EXPR_SAME)
2518     {
2519       bitmap_set_bit (INSN_ANALYZED_DEPS (insn), expr_uid);
2520       bitmap_clear_bit (INSN_FOUND_DEPS (insn), expr_uid);
2521     }
2522   else if (res == MOVEUP_EXPR_AS_RHS)
2523     {
2524       bitmap_clear_bit (INSN_ANALYZED_DEPS (insn), expr_uid);
2525       bitmap_set_bit (INSN_FOUND_DEPS (insn), expr_uid);
2526     }
2527   else
2528     gcc_unreachable ();
2529 }
2530 
2531 /* Update hashtable on INSN with changed EXPR, old EXPR_OLD_VINSN
2532    and transformation type TRANS_TYPE.  */
2533 static void
2534 update_transformation_cache (expr_t expr, insn_t insn,
2535                              bool inside_insn_group,
2536                              enum local_trans_type trans_type,
2537                              vinsn_t expr_old_vinsn)
2538 {
2539   struct transformed_insns *pti;
2540 
2541   if (inside_insn_group)
2542     return;
2543 
2544   pti = XNEW (struct transformed_insns);
2545   pti->vinsn_old = expr_old_vinsn;
2546   pti->vinsn_new = EXPR_VINSN (expr);
2547   pti->type = trans_type;
2548   pti->was_target_conflict = was_target_conflict;
2549   pti->ds = EXPR_SPEC_DONE_DS (expr);
2550   pti->needs_check = EXPR_NEEDS_SPEC_CHECK_P (expr);
2551   vinsn_attach (pti->vinsn_old);
2552   vinsn_attach (pti->vinsn_new);
2553   *((struct transformed_insns **)
2554     htab_find_slot_with_hash (INSN_TRANSFORMED_INSNS (insn),
2555                               pti, VINSN_HASH_RTX (expr_old_vinsn),
2556                               INSERT)) = pti;
2557 }
2558 
2559 /* Same as moveup_expr, but first looks up the result of
2560    transformation in caches.  */
2561 static enum MOVEUP_EXPR_CODE
2562 moveup_expr_cached (expr_t expr, insn_t insn, bool inside_insn_group)
2563 {
2564   enum MOVEUP_EXPR_CODE res;
2565   bool got_answer = false;
2566 
2567   if (sched_verbose >= 6)
2568     {
2569       sel_print ("Moving ");
2570       dump_expr (expr);
2571       sel_print (" through %d: ", INSN_UID (insn));
2572     }
2573 
2574   if (DEBUG_INSN_P (EXPR_INSN_RTX (expr))
2575       && (sel_bb_head (BLOCK_FOR_INSN (EXPR_INSN_RTX (expr)))
2576 	  == EXPR_INSN_RTX (expr)))
2577     /* Don't use cached information for debug insns that are heads of
2578        basic blocks.  */;
2579   else if (try_bitmap_cache (expr, insn, inside_insn_group, &res))
2580     /* When inside insn group, we do not want remove stores conflicting
2581        with previosly issued loads.  */
2582     got_answer = ! inside_insn_group || res != MOVEUP_EXPR_NULL;
2583   else if (try_transformation_cache (expr, insn, &res))
2584     got_answer = true;
2585 
2586   if (! got_answer)
2587     {
2588       /* Invoke moveup_expr and record the results.  */
2589       vinsn_t expr_old_vinsn = EXPR_VINSN (expr);
2590       ds_t expr_old_spec_ds = EXPR_SPEC_DONE_DS (expr);
2591       int expr_uid = INSN_UID (VINSN_INSN_RTX (expr_old_vinsn));
2592       bool unique_p = VINSN_UNIQUE_P (expr_old_vinsn);
2593       enum local_trans_type trans_type = TRANS_SUBSTITUTION;
2594 
2595       /* ??? Invent something better than this.  We can't allow old_vinsn
2596          to go, we need it for the history vector.  */
2597       vinsn_attach (expr_old_vinsn);
2598 
2599       res = moveup_expr (expr, insn, inside_insn_group,
2600                          &trans_type);
2601       switch (res)
2602         {
2603         case MOVEUP_EXPR_NULL:
2604           update_bitmap_cache (expr, insn, inside_insn_group, res);
2605 	  if (sched_verbose >= 6)
2606             sel_print ("removed\n");
2607 	  break;
2608 
2609 	case MOVEUP_EXPR_SAME:
2610           update_bitmap_cache (expr, insn, inside_insn_group, res);
2611           if (sched_verbose >= 6)
2612             sel_print ("unchanged\n");
2613 	  break;
2614 
2615         case MOVEUP_EXPR_AS_RHS:
2616           gcc_assert (!unique_p || inside_insn_group);
2617           update_bitmap_cache (expr, insn, inside_insn_group, res);
2618 	  if (sched_verbose >= 6)
2619             sel_print ("unchanged (as RHS)\n");
2620 	  break;
2621 
2622 	case MOVEUP_EXPR_CHANGED:
2623           gcc_assert (INSN_UID (EXPR_INSN_RTX (expr)) != expr_uid
2624                       || EXPR_SPEC_DONE_DS (expr) != expr_old_spec_ds);
2625           insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
2626                                   INSN_UID (insn), trans_type,
2627                                   expr_old_vinsn, EXPR_VINSN (expr),
2628                                   expr_old_spec_ds);
2629           update_transformation_cache (expr, insn, inside_insn_group,
2630                                        trans_type, expr_old_vinsn);
2631           if (sched_verbose >= 6)
2632             {
2633               sel_print ("changed: ");
2634               dump_expr (expr);
2635               sel_print ("\n");
2636             }
2637 	  break;
2638 	default:
2639 	  gcc_unreachable ();
2640         }
2641 
2642       vinsn_detach (expr_old_vinsn);
2643     }
2644 
2645   return res;
2646 }
2647 
2648 /* Moves an av set AVP up through INSN, performing necessary
2649    transformations.  */
2650 static void
2651 moveup_set_expr (av_set_t *avp, insn_t insn, bool inside_insn_group)
2652 {
2653   av_set_iterator i;
2654   expr_t expr;
2655 
2656   FOR_EACH_EXPR_1 (expr, i, avp)
2657     {
2658 
2659       switch (moveup_expr_cached (expr, insn, inside_insn_group))
2660 	{
2661 	case MOVEUP_EXPR_SAME:
2662         case MOVEUP_EXPR_AS_RHS:
2663 	  break;
2664 
2665 	case MOVEUP_EXPR_NULL:
2666 	  av_set_iter_remove (&i);
2667 	  break;
2668 
2669 	case MOVEUP_EXPR_CHANGED:
2670           expr = merge_with_other_exprs (avp, &i, expr);
2671 	  break;
2672 
2673 	default:
2674 	  gcc_unreachable ();
2675 	}
2676     }
2677 }
2678 
2679 /* Moves AVP set along PATH.  */
2680 static void
2681 moveup_set_inside_insn_group (av_set_t *avp, ilist_t path)
2682 {
2683   int last_cycle;
2684 
2685   if (sched_verbose >= 6)
2686     sel_print ("Moving expressions up in the insn group...\n");
2687   if (! path)
2688     return;
2689   last_cycle = INSN_SCHED_CYCLE (ILIST_INSN (path));
2690   while (path
2691          && INSN_SCHED_CYCLE (ILIST_INSN (path)) == last_cycle)
2692     {
2693       moveup_set_expr (avp, ILIST_INSN (path), true);
2694       path = ILIST_NEXT (path);
2695     }
2696 }
2697 
2698 /* Returns true if after moving EXPR along PATH it equals to EXPR_VLIW.  */
2699 static bool
2700 equal_after_moveup_path_p (expr_t expr, ilist_t path, expr_t expr_vliw)
2701 {
2702   expr_def _tmp, *tmp = &_tmp;
2703   int last_cycle;
2704   bool res = true;
2705 
2706   copy_expr_onside (tmp, expr);
2707   last_cycle = path ? INSN_SCHED_CYCLE (ILIST_INSN (path)) : 0;
2708   while (path
2709          && res
2710          && INSN_SCHED_CYCLE (ILIST_INSN (path)) == last_cycle)
2711     {
2712       res = (moveup_expr_cached (tmp, ILIST_INSN (path), true)
2713              != MOVEUP_EXPR_NULL);
2714       path = ILIST_NEXT (path);
2715     }
2716 
2717   if (res)
2718     {
2719       vinsn_t tmp_vinsn = EXPR_VINSN (tmp);
2720       vinsn_t expr_vliw_vinsn = EXPR_VINSN (expr_vliw);
2721 
2722       if (tmp_vinsn != expr_vliw_vinsn)
2723 	res = vinsn_equal_p (tmp_vinsn, expr_vliw_vinsn);
2724     }
2725 
2726   clear_expr (tmp);
2727   return res;
2728 }
2729 
2730 
2731 /* Functions that compute av and lv sets.  */
2732 
2733 /* Returns true if INSN is not a downward continuation of the given path P in
2734    the current stage.  */
2735 static bool
2736 is_ineligible_successor (insn_t insn, ilist_t p)
2737 {
2738   insn_t prev_insn;
2739 
2740   /* Check if insn is not deleted.  */
2741   if (PREV_INSN (insn) && NEXT_INSN (PREV_INSN (insn)) != insn)
2742     gcc_unreachable ();
2743   else if (NEXT_INSN (insn) && PREV_INSN (NEXT_INSN (insn)) != insn)
2744     gcc_unreachable ();
2745 
2746   /* If it's the first insn visited, then the successor is ok.  */
2747   if (!p)
2748     return false;
2749 
2750   prev_insn = ILIST_INSN (p);
2751 
2752   if (/* a backward edge.  */
2753       INSN_SEQNO (insn) < INSN_SEQNO (prev_insn)
2754       /* is already visited.  */
2755       || (INSN_SEQNO (insn) == INSN_SEQNO (prev_insn)
2756 	  && (ilist_is_in_p (p, insn)
2757               /* We can reach another fence here and still seqno of insn
2758                  would be equal to seqno of prev_insn.  This is possible
2759                  when prev_insn is a previously created bookkeeping copy.
2760                  In that case it'd get a seqno of insn.  Thus, check here
2761                  whether insn is in current fence too.  */
2762               || IN_CURRENT_FENCE_P (insn)))
2763       /* Was already scheduled on this round.  */
2764       || (INSN_SEQNO (insn) > INSN_SEQNO (prev_insn)
2765 	  && IN_CURRENT_FENCE_P (insn))
2766       /* An insn from another fence could also be
2767 	 scheduled earlier even if this insn is not in
2768 	 a fence list right now.  Check INSN_SCHED_CYCLE instead.  */
2769       || (!pipelining_p
2770           && INSN_SCHED_TIMES (insn) > 0))
2771     return true;
2772   else
2773     return false;
2774 }
2775 
2776 /* Computes the av_set below the last bb insn INSN, doing all the 'dirty work'
2777    of handling multiple successors and properly merging its av_sets.  P is
2778    the current path traversed.  WS is the size of lookahead window.
2779    Return the av set computed.  */
2780 static av_set_t
2781 compute_av_set_at_bb_end (insn_t insn, ilist_t p, int ws)
2782 {
2783   struct succs_info *sinfo;
2784   av_set_t expr_in_all_succ_branches = NULL;
2785   int is;
2786   insn_t succ, zero_succ = NULL;
2787   av_set_t av1 = NULL;
2788 
2789   gcc_assert (sel_bb_end_p (insn));
2790 
2791   /* Find different kind of successors needed for correct computing of
2792      SPEC and TARGET_AVAILABLE attributes.  */
2793   sinfo = compute_succs_info (insn, SUCCS_NORMAL);
2794 
2795   /* Debug output.  */
2796   if (sched_verbose >= 6)
2797     {
2798       sel_print ("successors of bb end (%d): ", INSN_UID (insn));
2799       dump_insn_vector (sinfo->succs_ok);
2800       sel_print ("\n");
2801       if (sinfo->succs_ok_n != sinfo->all_succs_n)
2802         sel_print ("real successors num: %d\n", sinfo->all_succs_n);
2803     }
2804 
2805   /* Add insn to the tail of current path.  */
2806   ilist_add (&p, insn);
2807 
2808   FOR_EACH_VEC_ELT (rtx, sinfo->succs_ok, is, succ)
2809     {
2810       av_set_t succ_set;
2811 
2812       /* We will edit SUCC_SET and EXPR_SPEC field of its elements.  */
2813       succ_set = compute_av_set_inside_bb (succ, p, ws, true);
2814 
2815       av_set_split_usefulness (succ_set,
2816                                VEC_index (int, sinfo->probs_ok, is),
2817                                sinfo->all_prob);
2818 
2819       if (sinfo->all_succs_n > 1)
2820 	{
2821           /* Find EXPR'es that came from *all* successors and save them
2822              into expr_in_all_succ_branches.  This set will be used later
2823              for calculating speculation attributes of EXPR'es.  */
2824           if (is == 0)
2825             {
2826               expr_in_all_succ_branches = av_set_copy (succ_set);
2827 
2828               /* Remember the first successor for later. */
2829               zero_succ = succ;
2830             }
2831           else
2832             {
2833               av_set_iterator i;
2834               expr_t expr;
2835 
2836               FOR_EACH_EXPR_1 (expr, i, &expr_in_all_succ_branches)
2837                 if (!av_set_is_in_p (succ_set, EXPR_VINSN (expr)))
2838                   av_set_iter_remove (&i);
2839             }
2840 	}
2841 
2842       /* Union the av_sets.  Check liveness restrictions on target registers
2843          in special case of two successors.  */
2844       if (sinfo->succs_ok_n == 2 && is == 1)
2845         {
2846           basic_block bb0 = BLOCK_FOR_INSN (zero_succ);
2847           basic_block bb1 = BLOCK_FOR_INSN (succ);
2848 
2849           gcc_assert (BB_LV_SET_VALID_P (bb0) && BB_LV_SET_VALID_P (bb1));
2850           av_set_union_and_live (&av1, &succ_set,
2851                                  BB_LV_SET (bb0),
2852                                  BB_LV_SET (bb1),
2853                                  insn);
2854         }
2855       else
2856         av_set_union_and_clear (&av1, &succ_set, insn);
2857     }
2858 
2859   /* Check liveness restrictions via hard way when there are more than
2860      two successors.  */
2861   if (sinfo->succs_ok_n > 2)
2862     FOR_EACH_VEC_ELT (rtx, sinfo->succs_ok, is, succ)
2863       {
2864         basic_block succ_bb = BLOCK_FOR_INSN (succ);
2865 
2866         gcc_assert (BB_LV_SET_VALID_P (succ_bb));
2867         mark_unavailable_targets (av1, BB_AV_SET (succ_bb),
2868                                   BB_LV_SET (succ_bb));
2869       }
2870 
2871   /* Finally, check liveness restrictions on paths leaving the region.  */
2872   if (sinfo->all_succs_n > sinfo->succs_ok_n)
2873     FOR_EACH_VEC_ELT (rtx, sinfo->succs_other, is, succ)
2874       mark_unavailable_targets
2875         (av1, NULL, BB_LV_SET (BLOCK_FOR_INSN (succ)));
2876 
2877   if (sinfo->all_succs_n > 1)
2878     {
2879       av_set_iterator i;
2880       expr_t expr;
2881 
2882       /* Increase the spec attribute of all EXPR'es that didn't come
2883 	 from all successors.  */
2884       FOR_EACH_EXPR (expr, i, av1)
2885 	if (!av_set_is_in_p (expr_in_all_succ_branches, EXPR_VINSN (expr)))
2886 	  EXPR_SPEC (expr)++;
2887 
2888       av_set_clear (&expr_in_all_succ_branches);
2889 
2890       /* Do not move conditional branches through other
2891 	 conditional branches.  So, remove all conditional
2892 	 branches from av_set if current operator is a conditional
2893 	 branch.  */
2894       av_set_substract_cond_branches (&av1);
2895     }
2896 
2897   ilist_remove (&p);
2898   free_succs_info (sinfo);
2899 
2900   if (sched_verbose >= 6)
2901     {
2902       sel_print ("av_succs (%d): ", INSN_UID (insn));
2903       dump_av_set (av1);
2904       sel_print ("\n");
2905     }
2906 
2907   return av1;
2908 }
2909 
2910 /* This function computes av_set for the FIRST_INSN by dragging valid
2911    av_set through all basic block insns either from the end of basic block
2912    (computed using compute_av_set_at_bb_end) or from the insn on which
2913    MAX_WS was exceeded.  It uses compute_av_set_at_bb_end to compute av_set
2914    below the basic block and handling conditional branches.
2915    FIRST_INSN - the basic block head, P - path consisting of the insns
2916    traversed on the way to the FIRST_INSN (the path is sparse, only bb heads
2917    and bb ends are added to the path), WS - current window size,
2918    NEED_COPY_P - true if we'll make a copy of av_set before returning it.  */
2919 static av_set_t
2920 compute_av_set_inside_bb (insn_t first_insn, ilist_t p, int ws,
2921 			  bool need_copy_p)
2922 {
2923   insn_t cur_insn;
2924   int end_ws = ws;
2925   insn_t bb_end = sel_bb_end (BLOCK_FOR_INSN (first_insn));
2926   insn_t after_bb_end = NEXT_INSN (bb_end);
2927   insn_t last_insn;
2928   av_set_t av = NULL;
2929   basic_block cur_bb = BLOCK_FOR_INSN (first_insn);
2930 
2931   /* Return NULL if insn is not on the legitimate downward path.  */
2932   if (is_ineligible_successor (first_insn, p))
2933     {
2934       if (sched_verbose >= 6)
2935         sel_print ("Insn %d is ineligible_successor\n", INSN_UID (first_insn));
2936 
2937       return NULL;
2938     }
2939 
2940   /* If insn already has valid av(insn) computed, just return it.  */
2941   if (AV_SET_VALID_P (first_insn))
2942     {
2943       av_set_t av_set;
2944 
2945       if (sel_bb_head_p (first_insn))
2946 	av_set = BB_AV_SET (BLOCK_FOR_INSN (first_insn));
2947       else
2948 	av_set = NULL;
2949 
2950       if (sched_verbose >= 6)
2951         {
2952           sel_print ("Insn %d has a valid av set: ", INSN_UID (first_insn));
2953           dump_av_set (av_set);
2954           sel_print ("\n");
2955         }
2956 
2957       return need_copy_p ? av_set_copy (av_set) : av_set;
2958     }
2959 
2960   ilist_add (&p, first_insn);
2961 
2962   /* As the result after this loop have completed, in LAST_INSN we'll
2963      have the insn which has valid av_set to start backward computation
2964      from: it either will be NULL because on it the window size was exceeded
2965      or other valid av_set as returned by compute_av_set for the last insn
2966      of the basic block.  */
2967   for (last_insn = first_insn; last_insn != after_bb_end;
2968        last_insn = NEXT_INSN (last_insn))
2969     {
2970       /* We may encounter valid av_set not only on bb_head, but also on
2971 	 those insns on which previously MAX_WS was exceeded.  */
2972       if (AV_SET_VALID_P (last_insn))
2973 	{
2974           if (sched_verbose >= 6)
2975             sel_print ("Insn %d has a valid empty av set\n", INSN_UID (last_insn));
2976 	  break;
2977 	}
2978 
2979       /* The special case: the last insn of the BB may be an
2980          ineligible_successor due to its SEQ_NO that was set on
2981 	 it as a bookkeeping.  */
2982       if (last_insn != first_insn
2983           && is_ineligible_successor (last_insn, p))
2984 	{
2985           if (sched_verbose >= 6)
2986             sel_print ("Insn %d is ineligible_successor\n", INSN_UID (last_insn));
2987 	  break;
2988 	}
2989 
2990       if (DEBUG_INSN_P (last_insn))
2991 	continue;
2992 
2993       if (end_ws > max_ws)
2994 	{
2995 	  /* We can reach max lookahead size at bb_header, so clean av_set
2996 	     first.  */
2997 	  INSN_WS_LEVEL (last_insn) = global_level;
2998 
2999 	  if (sched_verbose >= 6)
3000             sel_print ("Insn %d is beyond the software lookahead window size\n",
3001                        INSN_UID (last_insn));
3002 	  break;
3003 	}
3004 
3005       end_ws++;
3006     }
3007 
3008   /* Get the valid av_set into AV above the LAST_INSN to start backward
3009      computation from.  It either will be empty av_set or av_set computed from
3010      the successors on the last insn of the current bb.  */
3011   if (last_insn != after_bb_end)
3012     {
3013       av = NULL;
3014 
3015       /* This is needed only to obtain av_sets that are identical to
3016          those computed by the old compute_av_set version.  */
3017       if (last_insn == first_insn && !INSN_NOP_P (last_insn))
3018         av_set_add (&av, INSN_EXPR (last_insn));
3019     }
3020   else
3021     /* END_WS is always already increased by 1 if LAST_INSN == AFTER_BB_END.  */
3022     av = compute_av_set_at_bb_end (bb_end, p, end_ws);
3023 
3024   /* Compute av_set in AV starting from below the LAST_INSN up to
3025      location above the FIRST_INSN.  */
3026   for (cur_insn = PREV_INSN (last_insn); cur_insn != PREV_INSN (first_insn);
3027        cur_insn = PREV_INSN (cur_insn))
3028     if (!INSN_NOP_P (cur_insn))
3029       {
3030         expr_t expr;
3031 
3032         moveup_set_expr (&av, cur_insn, false);
3033 
3034         /* If the expression for CUR_INSN is already in the set,
3035            replace it by the new one.  */
3036         expr = av_set_lookup (av, INSN_VINSN (cur_insn));
3037         if (expr != NULL)
3038           {
3039             clear_expr (expr);
3040             copy_expr (expr, INSN_EXPR (cur_insn));
3041           }
3042         else
3043           av_set_add (&av, INSN_EXPR (cur_insn));
3044       }
3045 
3046   /* Clear stale bb_av_set.  */
3047   if (sel_bb_head_p (first_insn))
3048     {
3049       av_set_clear (&BB_AV_SET (cur_bb));
3050       BB_AV_SET (cur_bb) = need_copy_p ? av_set_copy (av) : av;
3051       BB_AV_LEVEL (cur_bb) = global_level;
3052     }
3053 
3054   if (sched_verbose >= 6)
3055     {
3056       sel_print ("Computed av set for insn %d: ", INSN_UID (first_insn));
3057       dump_av_set (av);
3058       sel_print ("\n");
3059     }
3060 
3061   ilist_remove (&p);
3062   return av;
3063 }
3064 
3065 /* Compute av set before INSN.
3066    INSN - the current operation (actual rtx INSN)
3067    P - the current path, which is list of insns visited so far
3068    WS - software lookahead window size.
3069    UNIQUE_P - TRUE, if returned av_set will be changed, hence
3070    if we want to save computed av_set in s_i_d, we should make a copy of it.
3071 
3072    In the resulting set we will have only expressions that don't have delay
3073    stalls and nonsubstitutable dependences.  */
3074 static av_set_t
3075 compute_av_set (insn_t insn, ilist_t p, int ws, bool unique_p)
3076 {
3077   return compute_av_set_inside_bb (insn, p, ws, unique_p);
3078 }
3079 
3080 /* Propagate a liveness set LV through INSN.  */
3081 static void
3082 propagate_lv_set (regset lv, insn_t insn)
3083 {
3084   gcc_assert (INSN_P (insn));
3085 
3086   if (INSN_NOP_P (insn))
3087     return;
3088 
3089   df_simulate_one_insn_backwards (BLOCK_FOR_INSN (insn), insn, lv);
3090 }
3091 
3092 /* Return livness set at the end of BB.  */
3093 static regset
3094 compute_live_after_bb (basic_block bb)
3095 {
3096   edge e;
3097   edge_iterator ei;
3098   regset lv = get_clear_regset_from_pool ();
3099 
3100   gcc_assert (!ignore_first);
3101 
3102   FOR_EACH_EDGE (e, ei, bb->succs)
3103     if (sel_bb_empty_p (e->dest))
3104       {
3105         if (! BB_LV_SET_VALID_P (e->dest))
3106           {
3107             gcc_unreachable ();
3108             gcc_assert (BB_LV_SET (e->dest) == NULL);
3109             BB_LV_SET (e->dest) = compute_live_after_bb (e->dest);
3110             BB_LV_SET_VALID_P (e->dest) = true;
3111           }
3112         IOR_REG_SET (lv, BB_LV_SET (e->dest));
3113       }
3114     else
3115       IOR_REG_SET (lv, compute_live (sel_bb_head (e->dest)));
3116 
3117   return lv;
3118 }
3119 
3120 /* Compute the set of all live registers at the point before INSN and save
3121    it at INSN if INSN is bb header.  */
3122 regset
3123 compute_live (insn_t insn)
3124 {
3125   basic_block bb = BLOCK_FOR_INSN (insn);
3126   insn_t final, temp;
3127   regset lv;
3128 
3129   /* Return the valid set if we're already on it.  */
3130   if (!ignore_first)
3131     {
3132       regset src = NULL;
3133 
3134       if (sel_bb_head_p (insn) && BB_LV_SET_VALID_P (bb))
3135         src = BB_LV_SET (bb);
3136       else
3137         {
3138           gcc_assert (in_current_region_p (bb));
3139           if (INSN_LIVE_VALID_P (insn))
3140             src = INSN_LIVE (insn);
3141         }
3142 
3143       if (src)
3144 	{
3145 	  lv = get_regset_from_pool ();
3146 	  COPY_REG_SET (lv, src);
3147 
3148           if (sel_bb_head_p (insn) && ! BB_LV_SET_VALID_P (bb))
3149             {
3150               COPY_REG_SET (BB_LV_SET (bb), lv);
3151               BB_LV_SET_VALID_P (bb) = true;
3152             }
3153 
3154 	  return_regset_to_pool (lv);
3155 	  return lv;
3156 	}
3157     }
3158 
3159   /* We've skipped the wrong lv_set.  Don't skip the right one.  */
3160   ignore_first = false;
3161   gcc_assert (in_current_region_p (bb));
3162 
3163   /* Find a valid LV set in this block or below, if needed.
3164      Start searching from the next insn: either ignore_first is true, or
3165      INSN doesn't have a correct live set.  */
3166   temp = NEXT_INSN (insn);
3167   final = NEXT_INSN (BB_END (bb));
3168   while (temp != final && ! INSN_LIVE_VALID_P (temp))
3169     temp = NEXT_INSN (temp);
3170   if (temp == final)
3171     {
3172       lv = compute_live_after_bb (bb);
3173       temp = PREV_INSN (temp);
3174     }
3175   else
3176     {
3177       lv = get_regset_from_pool ();
3178       COPY_REG_SET (lv, INSN_LIVE (temp));
3179     }
3180 
3181   /* Put correct lv sets on the insns which have bad sets.  */
3182   final = PREV_INSN (insn);
3183   while (temp != final)
3184     {
3185       propagate_lv_set (lv, temp);
3186       COPY_REG_SET (INSN_LIVE (temp), lv);
3187       INSN_LIVE_VALID_P (temp) = true;
3188       temp = PREV_INSN (temp);
3189     }
3190 
3191   /* Also put it in a BB.  */
3192   if (sel_bb_head_p (insn))
3193     {
3194       basic_block bb = BLOCK_FOR_INSN (insn);
3195 
3196       COPY_REG_SET (BB_LV_SET (bb), lv);
3197       BB_LV_SET_VALID_P (bb) = true;
3198     }
3199 
3200   /* We return LV to the pool, but will not clear it there.  Thus we can
3201      legimatelly use LV till the next use of regset_pool_get ().  */
3202   return_regset_to_pool (lv);
3203   return lv;
3204 }
3205 
3206 /* Update liveness sets for INSN.  */
3207 static inline void
3208 update_liveness_on_insn (rtx insn)
3209 {
3210   ignore_first = true;
3211   compute_live (insn);
3212 }
3213 
3214 /* Compute liveness below INSN and write it into REGS.  */
3215 static inline void
3216 compute_live_below_insn (rtx insn, regset regs)
3217 {
3218   rtx succ;
3219   succ_iterator si;
3220 
3221   FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
3222     IOR_REG_SET (regs, compute_live (succ));
3223 }
3224 
3225 /* Update the data gathered in av and lv sets starting from INSN.  */
3226 static void
3227 update_data_sets (rtx insn)
3228 {
3229   update_liveness_on_insn (insn);
3230   if (sel_bb_head_p (insn))
3231     {
3232       gcc_assert (AV_LEVEL (insn) != 0);
3233       BB_AV_LEVEL (BLOCK_FOR_INSN (insn)) = -1;
3234       compute_av_set (insn, NULL, 0, 0);
3235     }
3236 }
3237 
3238 
3239 /* Helper for move_op () and find_used_regs ().
3240    Return speculation type for which a check should be created on the place
3241    of INSN.  EXPR is one of the original ops we are searching for.  */
3242 static ds_t
3243 get_spec_check_type_for_insn (insn_t insn, expr_t expr)
3244 {
3245   ds_t to_check_ds;
3246   ds_t already_checked_ds = EXPR_SPEC_DONE_DS (INSN_EXPR (insn));
3247 
3248   to_check_ds = EXPR_SPEC_TO_CHECK_DS (expr);
3249 
3250   if (targetm.sched.get_insn_checked_ds)
3251     already_checked_ds |= targetm.sched.get_insn_checked_ds (insn);
3252 
3253   if (spec_info != NULL
3254       && (spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL))
3255     already_checked_ds |= BEGIN_CONTROL;
3256 
3257   already_checked_ds = ds_get_speculation_types (already_checked_ds);
3258 
3259   to_check_ds &= ~already_checked_ds;
3260 
3261   return to_check_ds;
3262 }
3263 
3264 /* Find the set of registers that are unavailable for storing expres
3265    while moving ORIG_OPS up on the path starting from INSN due to
3266    liveness (USED_REGS) or hardware restrictions (REG_RENAME_P).
3267 
3268    All the original operations found during the traversal are saved in the
3269    ORIGINAL_INSNS list.
3270 
3271    REG_RENAME_P denotes the set of hardware registers that
3272    can not be used with renaming due to the register class restrictions,
3273    mode restrictions and other (the register we'll choose should be
3274    compatible class with the original uses, shouldn't be in call_used_regs,
3275    should be HARD_REGNO_RENAME_OK etc).
3276 
3277    Returns TRUE if we've found all original insns, FALSE otherwise.
3278 
3279    This function utilizes code_motion_path_driver (formerly find_used_regs_1)
3280    to traverse the code motion paths.  This helper function finds registers
3281    that are not available for storing expres while moving ORIG_OPS up on the
3282    path starting from INSN.  A register considered as used on the moving path,
3283    if one of the following conditions is not satisfied:
3284 
3285       (1) a register not set or read on any path from xi to an instance of
3286 	  the original operation,
3287       (2) not among the live registers of the point immediately following the
3288           first original operation on a given downward path, except for the
3289 	  original target register of the operation,
3290       (3) not live on the other path of any conditional branch that is passed
3291 	  by the operation, in case original operations are not present on
3292 	  both paths of the conditional branch.
3293 
3294    All the original operations found during the traversal are saved in the
3295    ORIGINAL_INSNS list.
3296 
3297    REG_RENAME_P->CROSSES_CALL is true, if there is a call insn on the path
3298    from INSN to original insn. In this case CALL_USED_REG_SET will be added
3299    to unavailable hard regs at the point original operation is found.  */
3300 
3301 static bool
3302 find_used_regs (insn_t insn, av_set_t orig_ops, regset used_regs,
3303 		struct reg_rename  *reg_rename_p, def_list_t *original_insns)
3304 {
3305   def_list_iterator i;
3306   def_t def;
3307   int res;
3308   bool needs_spec_check_p = false;
3309   expr_t expr;
3310   av_set_iterator expr_iter;
3311   struct fur_static_params sparams;
3312   struct cmpd_local_params lparams;
3313 
3314   /* We haven't visited any blocks yet.  */
3315   bitmap_clear (code_motion_visited_blocks);
3316 
3317   /* Init parameters for code_motion_path_driver.  */
3318   sparams.crosses_call = false;
3319   sparams.original_insns = original_insns;
3320   sparams.used_regs = used_regs;
3321 
3322   /* Set the appropriate hooks and data.  */
3323   code_motion_path_driver_info = &fur_hooks;
3324 
3325   res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
3326 
3327   reg_rename_p->crosses_call |= sparams.crosses_call;
3328 
3329   gcc_assert (res == 1);
3330   gcc_assert (original_insns && *original_insns);
3331 
3332   /* ??? We calculate whether an expression needs a check when computing
3333      av sets.  This information is not as precise as it could be due to
3334      merging this bit in merge_expr.  We can do better in find_used_regs,
3335      but we want to avoid multiple traversals of the same code motion
3336      paths.  */
3337   FOR_EACH_EXPR (expr, expr_iter, orig_ops)
3338     needs_spec_check_p |= EXPR_NEEDS_SPEC_CHECK_P (expr);
3339 
3340   /* Mark hardware regs in REG_RENAME_P that are not suitable
3341      for renaming expr in INSN due to hardware restrictions (register class,
3342      modes compatibility etc).  */
3343   FOR_EACH_DEF (def, i, *original_insns)
3344     {
3345       vinsn_t vinsn = INSN_VINSN (def->orig_insn);
3346 
3347       if (VINSN_SEPARABLE_P (vinsn))
3348 	mark_unavailable_hard_regs (def, reg_rename_p, used_regs);
3349 
3350       /* Do not allow clobbering of ld.[sa] address in case some of the
3351          original operations need a check.  */
3352       if (needs_spec_check_p)
3353 	IOR_REG_SET (used_regs, VINSN_REG_USES (vinsn));
3354     }
3355 
3356   return true;
3357 }
3358 
3359 
3360 /* Functions to choose the best insn from available ones.  */
3361 
3362 /* Adjusts the priority for EXPR using the backend *_adjust_priority hook.  */
3363 static int
3364 sel_target_adjust_priority (expr_t expr)
3365 {
3366   int priority = EXPR_PRIORITY (expr);
3367   int new_priority;
3368 
3369   if (targetm.sched.adjust_priority)
3370     new_priority = targetm.sched.adjust_priority (EXPR_INSN_RTX (expr), priority);
3371   else
3372     new_priority = priority;
3373 
3374   /* If the priority has changed, adjust EXPR_PRIORITY_ADJ accordingly.  */
3375   EXPR_PRIORITY_ADJ (expr) = new_priority - EXPR_PRIORITY (expr);
3376 
3377   gcc_assert (EXPR_PRIORITY_ADJ (expr) >= 0);
3378 
3379   if (sched_verbose >= 4)
3380     sel_print ("sel_target_adjust_priority: insn %d,  %d+%d = %d.\n",
3381 	       INSN_UID (EXPR_INSN_RTX (expr)), EXPR_PRIORITY (expr),
3382 	       EXPR_PRIORITY_ADJ (expr), new_priority);
3383 
3384   return new_priority;
3385 }
3386 
3387 /* Rank two available exprs for schedule.  Never return 0 here.  */
3388 static int
3389 sel_rank_for_schedule (const void *x, const void *y)
3390 {
3391   expr_t tmp = *(const expr_t *) y;
3392   expr_t tmp2 = *(const expr_t *) x;
3393   insn_t tmp_insn, tmp2_insn;
3394   vinsn_t tmp_vinsn, tmp2_vinsn;
3395   int val;
3396 
3397   tmp_vinsn = EXPR_VINSN (tmp);
3398   tmp2_vinsn = EXPR_VINSN (tmp2);
3399   tmp_insn = EXPR_INSN_RTX (tmp);
3400   tmp2_insn = EXPR_INSN_RTX (tmp2);
3401 
3402   /* Schedule debug insns as early as possible.  */
3403   if (DEBUG_INSN_P (tmp_insn) && !DEBUG_INSN_P (tmp2_insn))
3404     return -1;
3405   else if (DEBUG_INSN_P (tmp2_insn))
3406     return 1;
3407 
3408   /* Prefer SCHED_GROUP_P insns to any others.  */
3409   if (SCHED_GROUP_P (tmp_insn) != SCHED_GROUP_P (tmp2_insn))
3410     {
3411       if (VINSN_UNIQUE_P (tmp_vinsn) && VINSN_UNIQUE_P (tmp2_vinsn))
3412         return SCHED_GROUP_P (tmp2_insn) ? 1 : -1;
3413 
3414       /* Now uniqueness means SCHED_GROUP_P is set, because schedule groups
3415          cannot be cloned.  */
3416       if (VINSN_UNIQUE_P (tmp2_vinsn))
3417         return 1;
3418       return -1;
3419     }
3420 
3421   /* Discourage scheduling of speculative checks.  */
3422   val = (sel_insn_is_speculation_check (tmp_insn)
3423 	 - sel_insn_is_speculation_check (tmp2_insn));
3424   if (val)
3425     return val;
3426 
3427   /* Prefer not scheduled insn over scheduled one.  */
3428   if (EXPR_SCHED_TIMES (tmp) > 0 || EXPR_SCHED_TIMES (tmp2) > 0)
3429     {
3430       val = EXPR_SCHED_TIMES (tmp) - EXPR_SCHED_TIMES (tmp2);
3431       if (val)
3432 	return val;
3433     }
3434 
3435   /* Prefer jump over non-jump instruction.  */
3436   if (control_flow_insn_p (tmp_insn) && !control_flow_insn_p (tmp2_insn))
3437     return -1;
3438   else if (control_flow_insn_p (tmp2_insn) && !control_flow_insn_p (tmp_insn))
3439     return 1;
3440 
3441   /* Prefer an expr with greater priority.  */
3442   if (EXPR_USEFULNESS (tmp) != 0 && EXPR_USEFULNESS (tmp2) != 0)
3443     {
3444       int p2 = EXPR_PRIORITY (tmp2) + EXPR_PRIORITY_ADJ (tmp2),
3445           p1 = EXPR_PRIORITY (tmp) + EXPR_PRIORITY_ADJ (tmp);
3446 
3447       val = p2 * EXPR_USEFULNESS (tmp2) - p1 * EXPR_USEFULNESS (tmp);
3448     }
3449   else
3450     val = EXPR_PRIORITY (tmp2) - EXPR_PRIORITY (tmp)
3451 	  + EXPR_PRIORITY_ADJ (tmp2) - EXPR_PRIORITY_ADJ (tmp);
3452   if (val)
3453     return val;
3454 
3455   if (spec_info != NULL && spec_info->mask != 0)
3456     /* This code was taken from haifa-sched.c: rank_for_schedule ().  */
3457     {
3458       ds_t ds1, ds2;
3459       dw_t dw1, dw2;
3460       int dw;
3461 
3462       ds1 = EXPR_SPEC_DONE_DS (tmp);
3463       if (ds1)
3464 	dw1 = ds_weak (ds1);
3465       else
3466 	dw1 = NO_DEP_WEAK;
3467 
3468       ds2 = EXPR_SPEC_DONE_DS (tmp2);
3469       if (ds2)
3470 	dw2 = ds_weak (ds2);
3471       else
3472 	dw2 = NO_DEP_WEAK;
3473 
3474       dw = dw2 - dw1;
3475       if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
3476 	return dw;
3477     }
3478 
3479   /* Prefer an old insn to a bookkeeping insn.  */
3480   if (INSN_UID (tmp_insn) < first_emitted_uid
3481       && INSN_UID (tmp2_insn) >= first_emitted_uid)
3482     return -1;
3483   if (INSN_UID (tmp_insn) >= first_emitted_uid
3484       && INSN_UID (tmp2_insn) < first_emitted_uid)
3485     return 1;
3486 
3487   /* Prefer an insn with smaller UID, as a last resort.
3488      We can't safely use INSN_LUID as it is defined only for those insns
3489      that are in the stream.  */
3490   return INSN_UID (tmp_insn) - INSN_UID (tmp2_insn);
3491 }
3492 
3493 /* Filter out expressions from av set pointed to by AV_PTR
3494    that are pipelined too many times.  */
3495 static void
3496 process_pipelined_exprs (av_set_t *av_ptr)
3497 {
3498   expr_t expr;
3499   av_set_iterator si;
3500 
3501   /* Don't pipeline already pipelined code as that would increase
3502      number of unnecessary register moves.  */
3503   FOR_EACH_EXPR_1 (expr, si, av_ptr)
3504     {
3505       if (EXPR_SCHED_TIMES (expr)
3506 	  >= PARAM_VALUE (PARAM_SELSCHED_MAX_SCHED_TIMES))
3507 	av_set_iter_remove (&si);
3508     }
3509 }
3510 
3511 /* Filter speculative insns from AV_PTR if we don't want them.  */
3512 static void
3513 process_spec_exprs (av_set_t *av_ptr)
3514 {
3515   bool try_data_p = true;
3516   bool try_control_p = true;
3517   expr_t expr;
3518   av_set_iterator si;
3519 
3520   if (spec_info == NULL)
3521     return;
3522 
3523   /* Scan *AV_PTR to find out if we want to consider speculative
3524      instructions for scheduling.  */
3525   FOR_EACH_EXPR_1 (expr, si, av_ptr)
3526     {
3527       ds_t ds;
3528 
3529       ds = EXPR_SPEC_DONE_DS (expr);
3530 
3531       /* The probability of a success is too low - don't speculate.  */
3532       if ((ds & SPECULATIVE)
3533           && (ds_weak (ds) < spec_info->data_weakness_cutoff
3534               || EXPR_USEFULNESS (expr) < spec_info->control_weakness_cutoff
3535 	      || (pipelining_p && false
3536 		  && (ds & DATA_SPEC)
3537 		  && (ds & CONTROL_SPEC))))
3538         {
3539           av_set_iter_remove (&si);
3540           continue;
3541         }
3542 
3543       if ((spec_info->flags & PREFER_NON_DATA_SPEC)
3544           && !(ds & BEGIN_DATA))
3545         try_data_p = false;
3546 
3547       if ((spec_info->flags & PREFER_NON_CONTROL_SPEC)
3548           && !(ds & BEGIN_CONTROL))
3549         try_control_p = false;
3550     }
3551 
3552   FOR_EACH_EXPR_1 (expr, si, av_ptr)
3553     {
3554       ds_t ds;
3555 
3556       ds = EXPR_SPEC_DONE_DS (expr);
3557 
3558       if (ds & SPECULATIVE)
3559         {
3560           if ((ds & BEGIN_DATA) && !try_data_p)
3561             /* We don't want any data speculative instructions right
3562                now.  */
3563             av_set_iter_remove (&si);
3564 
3565           if ((ds & BEGIN_CONTROL) && !try_control_p)
3566             /* We don't want any control speculative instructions right
3567                now.  */
3568             av_set_iter_remove (&si);
3569         }
3570     }
3571 }
3572 
3573 /* Search for any use-like insns in AV_PTR and decide on scheduling
3574    them.  Return one when found, and NULL otherwise.
3575    Note that we check here whether a USE could be scheduled to avoid
3576    an infinite loop later.  */
3577 static expr_t
3578 process_use_exprs (av_set_t *av_ptr)
3579 {
3580   expr_t expr;
3581   av_set_iterator si;
3582   bool uses_present_p = false;
3583   bool try_uses_p = true;
3584 
3585   FOR_EACH_EXPR_1 (expr, si, av_ptr)
3586     {
3587       /* This will also initialize INSN_CODE for later use.  */
3588       if (recog_memoized (EXPR_INSN_RTX (expr)) < 0)
3589         {
3590           /* If we have a USE in *AV_PTR that was not scheduled yet,
3591              do so because it will do good only.  */
3592           if (EXPR_SCHED_TIMES (expr) <= 0)
3593             {
3594               if (EXPR_TARGET_AVAILABLE (expr) == 1)
3595                 return expr;
3596 
3597               av_set_iter_remove (&si);
3598             }
3599           else
3600             {
3601               gcc_assert (pipelining_p);
3602 
3603               uses_present_p = true;
3604             }
3605         }
3606       else
3607         try_uses_p = false;
3608     }
3609 
3610   if (uses_present_p)
3611     {
3612       /* If we don't want to schedule any USEs right now and we have some
3613            in *AV_PTR, remove them, else just return the first one found.  */
3614       if (!try_uses_p)
3615         {
3616           FOR_EACH_EXPR_1 (expr, si, av_ptr)
3617             if (INSN_CODE (EXPR_INSN_RTX (expr)) < 0)
3618               av_set_iter_remove (&si);
3619         }
3620       else
3621         {
3622           FOR_EACH_EXPR_1 (expr, si, av_ptr)
3623             {
3624               gcc_assert (INSN_CODE (EXPR_INSN_RTX (expr)) < 0);
3625 
3626               if (EXPR_TARGET_AVAILABLE (expr) == 1)
3627                 return expr;
3628 
3629               av_set_iter_remove (&si);
3630             }
3631         }
3632     }
3633 
3634   return NULL;
3635 }
3636 
3637 /* Lookup EXPR in VINSN_VEC and return TRUE if found.  Also check patterns from
3638    EXPR's history of changes.  */
3639 static bool
3640 vinsn_vec_has_expr_p (vinsn_vec_t vinsn_vec, expr_t expr)
3641 {
3642   vinsn_t vinsn, expr_vinsn;
3643   int n;
3644   unsigned i;
3645 
3646   /* Start with checking expr itself and then proceed with all the old forms
3647      of expr taken from its history vector.  */
3648   for (i = 0, expr_vinsn = EXPR_VINSN (expr);
3649        expr_vinsn;
3650        expr_vinsn = (i < VEC_length (expr_history_def,
3651 				     EXPR_HISTORY_OF_CHANGES (expr))
3652 		     ? VEC_index (expr_history_def,
3653 				  EXPR_HISTORY_OF_CHANGES (expr),
3654 				  i++)->old_expr_vinsn
3655 		     : NULL))
3656     FOR_EACH_VEC_ELT (vinsn_t, vinsn_vec, n, vinsn)
3657       if (VINSN_SEPARABLE_P (vinsn))
3658 	{
3659 	  if (vinsn_equal_p (vinsn, expr_vinsn))
3660 	    return true;
3661 	}
3662       else
3663 	{
3664 	  /* For non-separable instructions, the blocking insn can have
3665 	     another pattern due to substitution, and we can't choose
3666 	     different register as in the above case.  Check all registers
3667 	     being written instead.  */
3668 	  if (bitmap_intersect_p (VINSN_REG_SETS (vinsn),
3669 				  VINSN_REG_SETS (expr_vinsn)))
3670 	    return true;
3671 	}
3672 
3673   return false;
3674 }
3675 
3676 #ifdef ENABLE_CHECKING
3677 /* Return true if either of expressions from ORIG_OPS can be blocked
3678    by previously created bookkeeping code.  STATIC_PARAMS points to static
3679    parameters of move_op.  */
3680 static bool
3681 av_set_could_be_blocked_by_bookkeeping_p (av_set_t orig_ops, void *static_params)
3682 {
3683   expr_t expr;
3684   av_set_iterator iter;
3685   moveop_static_params_p sparams;
3686 
3687   /* This checks that expressions in ORIG_OPS are not blocked by bookkeeping
3688      created while scheduling on another fence.  */
3689   FOR_EACH_EXPR (expr, iter, orig_ops)
3690     if (vinsn_vec_has_expr_p (vec_bookkeeping_blocked_vinsns, expr))
3691       return true;
3692 
3693   gcc_assert (code_motion_path_driver_info == &move_op_hooks);
3694   sparams = (moveop_static_params_p) static_params;
3695 
3696   /* Expressions can be also blocked by bookkeeping created during current
3697      move_op.  */
3698   if (bitmap_bit_p (current_copies, INSN_UID (sparams->failed_insn)))
3699     FOR_EACH_EXPR (expr, iter, orig_ops)
3700       if (moveup_expr_cached (expr, sparams->failed_insn, false) != MOVEUP_EXPR_NULL)
3701         return true;
3702 
3703   /* Expressions in ORIG_OPS may have wrong destination register due to
3704      renaming.  Check with the right register instead.  */
3705   if (sparams->dest && REG_P (sparams->dest))
3706     {
3707       rtx reg = sparams->dest;
3708       vinsn_t failed_vinsn = INSN_VINSN (sparams->failed_insn);
3709 
3710       if (register_unavailable_p (VINSN_REG_SETS (failed_vinsn), reg)
3711 	  || register_unavailable_p (VINSN_REG_USES (failed_vinsn), reg)
3712 	  || register_unavailable_p (VINSN_REG_CLOBBERS (failed_vinsn), reg))
3713 	return true;
3714     }
3715 
3716   return false;
3717 }
3718 #endif
3719 
3720 /* Clear VINSN_VEC and detach vinsns.  */
3721 static void
3722 vinsn_vec_clear (vinsn_vec_t *vinsn_vec)
3723 {
3724   unsigned len = VEC_length (vinsn_t, *vinsn_vec);
3725   if (len > 0)
3726     {
3727       vinsn_t vinsn;
3728       int n;
3729 
3730       FOR_EACH_VEC_ELT (vinsn_t, *vinsn_vec, n, vinsn)
3731         vinsn_detach (vinsn);
3732       VEC_block_remove (vinsn_t, *vinsn_vec, 0, len);
3733     }
3734 }
3735 
3736 /* Add the vinsn of EXPR to the VINSN_VEC.  */
3737 static void
3738 vinsn_vec_add (vinsn_vec_t *vinsn_vec, expr_t expr)
3739 {
3740   vinsn_attach (EXPR_VINSN (expr));
3741   VEC_safe_push (vinsn_t, heap, *vinsn_vec, EXPR_VINSN (expr));
3742 }
3743 
3744 /* Free the vector representing blocked expressions.  */
3745 static void
3746 vinsn_vec_free (vinsn_vec_t *vinsn_vec)
3747 {
3748   if (*vinsn_vec)
3749     VEC_free (vinsn_t, heap, *vinsn_vec);
3750 }
3751 
3752 /* Increase EXPR_PRIORITY_ADJ for INSN by AMOUNT.  */
3753 
3754 void sel_add_to_insn_priority (rtx insn, int amount)
3755 {
3756   EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) += amount;
3757 
3758   if (sched_verbose >= 2)
3759     sel_print ("sel_add_to_insn_priority: insn %d, by %d (now %d+%d).\n",
3760 	       INSN_UID (insn), amount, EXPR_PRIORITY (INSN_EXPR (insn)),
3761 	       EXPR_PRIORITY_ADJ (INSN_EXPR (insn)));
3762 }
3763 
3764 /* Turn AV into a vector, filter inappropriate insns and sort it.  Return
3765    true if there is something to schedule.  BNDS and FENCE are current
3766    boundaries and fence, respectively.  If we need to stall for some cycles
3767    before an expr from AV would become available, write this number to
3768    *PNEED_STALL.  */
3769 static bool
3770 fill_vec_av_set (av_set_t av, blist_t bnds, fence_t fence,
3771                  int *pneed_stall)
3772 {
3773   av_set_iterator si;
3774   expr_t expr;
3775   int sched_next_worked = 0, stalled, n;
3776   static int av_max_prio, est_ticks_till_branch;
3777   int min_need_stall = -1;
3778   deps_t dc = BND_DC (BLIST_BND (bnds));
3779 
3780   /* Bail out early when the ready list contained only USEs/CLOBBERs that are
3781      already scheduled.  */
3782   if (av == NULL)
3783     return false;
3784 
3785   /* Empty vector from the previous stuff.  */
3786   if (VEC_length (expr_t, vec_av_set) > 0)
3787     VEC_block_remove (expr_t, vec_av_set, 0, VEC_length (expr_t, vec_av_set));
3788 
3789   /* Turn the set into a vector for sorting and call sel_target_adjust_priority
3790      for each insn.  */
3791   gcc_assert (VEC_empty (expr_t, vec_av_set));
3792   FOR_EACH_EXPR (expr, si, av)
3793     {
3794       VEC_safe_push (expr_t, heap, vec_av_set, expr);
3795 
3796       gcc_assert (EXPR_PRIORITY_ADJ (expr) == 0 || *pneed_stall);
3797 
3798       /* Adjust priority using target backend hook.  */
3799       sel_target_adjust_priority (expr);
3800     }
3801 
3802   /* Sort the vector.  */
3803   VEC_qsort (expr_t, vec_av_set, sel_rank_for_schedule);
3804 
3805   /* We record maximal priority of insns in av set for current instruction
3806      group.  */
3807   if (FENCE_STARTS_CYCLE_P (fence))
3808     av_max_prio = est_ticks_till_branch = INT_MIN;
3809 
3810   /* Filter out inappropriate expressions.  Loop's direction is reversed to
3811      visit "best" instructions first.  We assume that VEC_unordered_remove
3812      moves last element in place of one being deleted.  */
3813   for (n = VEC_length (expr_t, vec_av_set) - 1, stalled = 0; n >= 0; n--)
3814     {
3815       expr_t expr = VEC_index (expr_t, vec_av_set, n);
3816       insn_t insn = EXPR_INSN_RTX (expr);
3817       signed char target_available;
3818       bool is_orig_reg_p = true;
3819       int need_cycles, new_prio;
3820 
3821       /* Don't allow any insns other than from SCHED_GROUP if we have one.  */
3822       if (FENCE_SCHED_NEXT (fence) && insn != FENCE_SCHED_NEXT (fence))
3823         {
3824           VEC_unordered_remove (expr_t, vec_av_set, n);
3825           continue;
3826         }
3827 
3828       /* Set number of sched_next insns (just in case there
3829          could be several).  */
3830       if (FENCE_SCHED_NEXT (fence))
3831         sched_next_worked++;
3832 
3833       /* Check all liveness requirements and try renaming.
3834          FIXME: try to minimize calls to this.  */
3835       target_available = EXPR_TARGET_AVAILABLE (expr);
3836 
3837       /* If insn was already scheduled on the current fence,
3838 	 set TARGET_AVAILABLE to -1 no matter what expr's attribute says.  */
3839       if (vinsn_vec_has_expr_p (vec_target_unavailable_vinsns, expr))
3840 	target_available = -1;
3841 
3842       /* If the availability of the EXPR is invalidated by the insertion of
3843 	 bookkeeping earlier, make sure that we won't choose this expr for
3844 	 scheduling if it's not separable, and if it is separable, then
3845 	 we have to recompute the set of available registers for it.  */
3846       if (vinsn_vec_has_expr_p (vec_bookkeeping_blocked_vinsns, expr))
3847 	{
3848           VEC_unordered_remove (expr_t, vec_av_set, n);
3849           if (sched_verbose >= 4)
3850             sel_print ("Expr %d is blocked by bookkeeping inserted earlier\n",
3851                        INSN_UID (insn));
3852           continue;
3853         }
3854 
3855       if (target_available == true)
3856 	{
3857           /* Do nothing -- we can use an existing register.  */
3858 	  is_orig_reg_p = EXPR_SEPARABLE_P (expr);
3859         }
3860       else if (/* Non-separable instruction will never
3861                   get another register. */
3862                (target_available == false
3863                 && !EXPR_SEPARABLE_P (expr))
3864                /* Don't try to find a register for low-priority expression.  */
3865                || (int) VEC_length (expr_t, vec_av_set) - 1 - n >= max_insns_to_rename
3866                /* ??? FIXME: Don't try to rename data speculation.  */
3867                || (EXPR_SPEC_DONE_DS (expr) & BEGIN_DATA)
3868                || ! find_best_reg_for_expr (expr, bnds, &is_orig_reg_p))
3869         {
3870           VEC_unordered_remove (expr_t, vec_av_set, n);
3871           if (sched_verbose >= 4)
3872             sel_print ("Expr %d has no suitable target register\n",
3873                        INSN_UID (insn));
3874           continue;
3875         }
3876 
3877       /* Filter expressions that need to be renamed or speculated when
3878 	 pipelining, because compensating register copies or speculation
3879 	 checks are likely to be placed near the beginning of the loop,
3880 	 causing a stall.  */
3881       if (pipelining_p && EXPR_ORIG_SCHED_CYCLE (expr) > 0
3882 	  && (!is_orig_reg_p || EXPR_SPEC_DONE_DS (expr) != 0))
3883 	{
3884 	  /* Estimation of number of cycles until loop branch for
3885 	     renaming/speculation to be successful.  */
3886 	  int need_n_ticks_till_branch = sel_vinsn_cost (EXPR_VINSN (expr));
3887 
3888 	  if ((int) current_loop_nest->ninsns < 9)
3889 	    {
3890 	      VEC_unordered_remove (expr_t, vec_av_set, n);
3891 	      if (sched_verbose >= 4)
3892 		sel_print ("Pipelining expr %d will likely cause stall\n",
3893 			   INSN_UID (insn));
3894 	      continue;
3895 	    }
3896 
3897 	  if ((int) current_loop_nest->ninsns - num_insns_scheduled
3898 	      < need_n_ticks_till_branch * issue_rate / 2
3899 	      && est_ticks_till_branch < need_n_ticks_till_branch)
3900 	     {
3901 	       VEC_unordered_remove (expr_t, vec_av_set, n);
3902 	       if (sched_verbose >= 4)
3903 		 sel_print ("Pipelining expr %d will likely cause stall\n",
3904 			    INSN_UID (insn));
3905 	       continue;
3906 	     }
3907 	}
3908 
3909       /* We want to schedule speculation checks as late as possible.  Discard
3910 	 them from av set if there are instructions with higher priority.  */
3911       if (sel_insn_is_speculation_check (insn)
3912 	  && EXPR_PRIORITY (expr) < av_max_prio)
3913 	{
3914           stalled++;
3915           min_need_stall = min_need_stall < 0 ? 1 : MIN (min_need_stall, 1);
3916           VEC_unordered_remove (expr_t, vec_av_set, n);
3917 	  if (sched_verbose >= 4)
3918 	    sel_print ("Delaying speculation check %d until its first use\n",
3919 		       INSN_UID (insn));
3920 	  continue;
3921 	}
3922 
3923       /* Ignore EXPRs available from pipelining to update AV_MAX_PRIO.  */
3924       if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0)
3925 	av_max_prio = MAX (av_max_prio, EXPR_PRIORITY (expr));
3926 
3927       /* Don't allow any insns whose data is not yet ready.
3928          Check first whether we've already tried them and failed.  */
3929       if (INSN_UID (insn) < FENCE_READY_TICKS_SIZE (fence))
3930 	{
3931           need_cycles = (FENCE_READY_TICKS (fence)[INSN_UID (insn)]
3932 			 - FENCE_CYCLE (fence));
3933 	  if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0)
3934 	    est_ticks_till_branch = MAX (est_ticks_till_branch,
3935 					 EXPR_PRIORITY (expr) + need_cycles);
3936 
3937 	  if (need_cycles > 0)
3938 	    {
3939 	      stalled++;
3940 	      min_need_stall = (min_need_stall < 0
3941 				? need_cycles
3942 				: MIN (min_need_stall, need_cycles));
3943 	      VEC_unordered_remove (expr_t, vec_av_set, n);
3944 
3945 	      if (sched_verbose >= 4)
3946 		sel_print ("Expr %d is not ready until cycle %d (cached)\n",
3947 			   INSN_UID (insn),
3948 			   FENCE_READY_TICKS (fence)[INSN_UID (insn)]);
3949 	      continue;
3950 	    }
3951 	}
3952 
3953       /* Now resort to dependence analysis to find whether EXPR might be
3954          stalled due to dependencies from FENCE's context.  */
3955       need_cycles = tick_check_p (expr, dc, fence);
3956       new_prio = EXPR_PRIORITY (expr) + EXPR_PRIORITY_ADJ (expr) + need_cycles;
3957 
3958       if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0)
3959 	est_ticks_till_branch = MAX (est_ticks_till_branch,
3960 				     new_prio);
3961 
3962       if (need_cycles > 0)
3963         {
3964           if (INSN_UID (insn) >= FENCE_READY_TICKS_SIZE (fence))
3965             {
3966               int new_size = INSN_UID (insn) * 3 / 2;
3967 
3968               FENCE_READY_TICKS (fence)
3969                 = (int *) xrecalloc (FENCE_READY_TICKS (fence),
3970                                      new_size, FENCE_READY_TICKS_SIZE (fence),
3971                                      sizeof (int));
3972             }
3973           FENCE_READY_TICKS (fence)[INSN_UID (insn)]
3974             = FENCE_CYCLE (fence) + need_cycles;
3975 
3976           stalled++;
3977           min_need_stall = (min_need_stall < 0
3978                             ? need_cycles
3979                             : MIN (min_need_stall, need_cycles));
3980 
3981           VEC_unordered_remove (expr_t, vec_av_set, n);
3982 
3983           if (sched_verbose >= 4)
3984             sel_print ("Expr %d is not ready yet until cycle %d\n",
3985                        INSN_UID (insn),
3986                        FENCE_READY_TICKS (fence)[INSN_UID (insn)]);
3987           continue;
3988         }
3989 
3990       if (sched_verbose >= 4)
3991         sel_print ("Expr %d is ok\n", INSN_UID (insn));
3992       min_need_stall = 0;
3993     }
3994 
3995   /* Clear SCHED_NEXT.  */
3996   if (FENCE_SCHED_NEXT (fence))
3997     {
3998       gcc_assert (sched_next_worked == 1);
3999       FENCE_SCHED_NEXT (fence) = NULL_RTX;
4000     }
4001 
4002   /* No need to stall if this variable was not initialized.  */
4003   if (min_need_stall < 0)
4004     min_need_stall = 0;
4005 
4006   if (VEC_empty (expr_t, vec_av_set))
4007     {
4008       /* We need to set *pneed_stall here, because later we skip this code
4009          when ready list is empty.  */
4010       *pneed_stall = min_need_stall;
4011       return false;
4012     }
4013   else
4014     gcc_assert (min_need_stall == 0);
4015 
4016   /* Sort the vector.  */
4017   VEC_qsort (expr_t, vec_av_set, sel_rank_for_schedule);
4018 
4019   if (sched_verbose >= 4)
4020     {
4021       sel_print ("Total ready exprs: %d, stalled: %d\n",
4022                  VEC_length (expr_t, vec_av_set), stalled);
4023       sel_print ("Sorted av set (%d): ", VEC_length (expr_t, vec_av_set));
4024       FOR_EACH_VEC_ELT (expr_t, vec_av_set, n, expr)
4025         dump_expr (expr);
4026       sel_print ("\n");
4027     }
4028 
4029   *pneed_stall = 0;
4030   return true;
4031 }
4032 
4033 /* Convert a vectored and sorted av set to the ready list that
4034    the rest of the backend wants to see.  */
4035 static void
4036 convert_vec_av_set_to_ready (void)
4037 {
4038   int n;
4039   expr_t expr;
4040 
4041   /* Allocate and fill the ready list from the sorted vector.  */
4042   ready.n_ready = VEC_length (expr_t, vec_av_set);
4043   ready.first = ready.n_ready - 1;
4044 
4045   gcc_assert (ready.n_ready > 0);
4046 
4047   if (ready.n_ready > max_issue_size)
4048     {
4049       max_issue_size = ready.n_ready;
4050       sched_extend_ready_list (ready.n_ready);
4051     }
4052 
4053   FOR_EACH_VEC_ELT (expr_t, vec_av_set, n, expr)
4054     {
4055       vinsn_t vi = EXPR_VINSN (expr);
4056       insn_t insn = VINSN_INSN_RTX (vi);
4057 
4058       ready_try[n] = 0;
4059       ready.vec[n] = insn;
4060     }
4061 }
4062 
4063 /* Initialize ready list from *AV_PTR for the max_issue () call.
4064    If any unrecognizable insn found in *AV_PTR, return it (and skip
4065    max_issue).  BND and FENCE are current boundary and fence,
4066    respectively.  If we need to stall for some cycles before an expr
4067    from *AV_PTR would become available, write this number to *PNEED_STALL.  */
4068 static expr_t
4069 fill_ready_list (av_set_t *av_ptr, blist_t bnds, fence_t fence,
4070                  int *pneed_stall)
4071 {
4072   expr_t expr;
4073 
4074   /* We do not support multiple boundaries per fence.  */
4075   gcc_assert (BLIST_NEXT (bnds) == NULL);
4076 
4077   /* Process expressions required special handling, i.e.  pipelined,
4078      speculative and recog() < 0 expressions first.  */
4079   process_pipelined_exprs (av_ptr);
4080   process_spec_exprs (av_ptr);
4081 
4082   /* A USE could be scheduled immediately.  */
4083   expr = process_use_exprs (av_ptr);
4084   if (expr)
4085     {
4086       *pneed_stall = 0;
4087       return expr;
4088     }
4089 
4090   /* Turn the av set to a vector for sorting.  */
4091   if (! fill_vec_av_set (*av_ptr, bnds, fence, pneed_stall))
4092     {
4093       ready.n_ready = 0;
4094       return NULL;
4095     }
4096 
4097   /* Build the final ready list.  */
4098   convert_vec_av_set_to_ready ();
4099   return NULL;
4100 }
4101 
4102 /* Wrapper for dfa_new_cycle ().  Returns TRUE if cycle was advanced.  */
4103 static bool
4104 sel_dfa_new_cycle (insn_t insn, fence_t fence)
4105 {
4106   int last_scheduled_cycle = FENCE_LAST_SCHEDULED_INSN (fence)
4107                              ? INSN_SCHED_CYCLE (FENCE_LAST_SCHEDULED_INSN (fence))
4108                              : FENCE_CYCLE (fence) - 1;
4109   bool res = false;
4110   int sort_p = 0;
4111 
4112   if (!targetm.sched.dfa_new_cycle)
4113     return false;
4114 
4115   memcpy (curr_state, FENCE_STATE (fence), dfa_state_size);
4116 
4117   while (!sort_p && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
4118                                                  insn, last_scheduled_cycle,
4119                                                  FENCE_CYCLE (fence), &sort_p))
4120     {
4121       memcpy (FENCE_STATE (fence), curr_state, dfa_state_size);
4122       advance_one_cycle (fence);
4123       memcpy (curr_state, FENCE_STATE (fence), dfa_state_size);
4124       res = true;
4125     }
4126 
4127   return res;
4128 }
4129 
4130 /* Invoke reorder* target hooks on the ready list.  Return the number of insns
4131    we can issue.  FENCE is the current fence.  */
4132 static int
4133 invoke_reorder_hooks (fence_t fence)
4134 {
4135   int issue_more;
4136   bool ran_hook = false;
4137 
4138   /* Call the reorder hook at the beginning of the cycle, and call
4139      the reorder2 hook in the middle of the cycle.  */
4140   if (FENCE_ISSUED_INSNS (fence) == 0)
4141     {
4142       if (targetm.sched.reorder
4143           && !SCHED_GROUP_P (ready_element (&ready, 0))
4144           && ready.n_ready > 1)
4145         {
4146           /* Don't give reorder the most prioritized insn as it can break
4147              pipelining.  */
4148           if (pipelining_p)
4149             --ready.n_ready;
4150 
4151           issue_more
4152             = targetm.sched.reorder (sched_dump, sched_verbose,
4153                                      ready_lastpos (&ready),
4154                                      &ready.n_ready, FENCE_CYCLE (fence));
4155 
4156           if (pipelining_p)
4157             ++ready.n_ready;
4158 
4159           ran_hook = true;
4160         }
4161       else
4162         /* Initialize can_issue_more for variable_issue.  */
4163         issue_more = issue_rate;
4164     }
4165   else if (targetm.sched.reorder2
4166            && !SCHED_GROUP_P (ready_element (&ready, 0)))
4167     {
4168       if (ready.n_ready == 1)
4169         issue_more =
4170           targetm.sched.reorder2 (sched_dump, sched_verbose,
4171                                   ready_lastpos (&ready),
4172                                   &ready.n_ready, FENCE_CYCLE (fence));
4173       else
4174         {
4175           if (pipelining_p)
4176             --ready.n_ready;
4177 
4178           issue_more =
4179             targetm.sched.reorder2 (sched_dump, sched_verbose,
4180                                     ready.n_ready
4181                                     ? ready_lastpos (&ready) : NULL,
4182                                     &ready.n_ready, FENCE_CYCLE (fence));
4183 
4184           if (pipelining_p)
4185             ++ready.n_ready;
4186         }
4187 
4188       ran_hook = true;
4189     }
4190   else
4191     issue_more = FENCE_ISSUE_MORE (fence);
4192 
4193   /* Ensure that ready list and vec_av_set are in line with each other,
4194      i.e. vec_av_set[i] == ready_element (&ready, i).  */
4195   if (issue_more && ran_hook)
4196     {
4197       int i, j, n;
4198       rtx *arr = ready.vec;
4199       expr_t *vec = VEC_address (expr_t, vec_av_set);
4200 
4201       for (i = 0, n = ready.n_ready; i < n; i++)
4202         if (EXPR_INSN_RTX (vec[i]) != arr[i])
4203           {
4204             expr_t tmp;
4205 
4206             for (j = i; j < n; j++)
4207               if (EXPR_INSN_RTX (vec[j]) == arr[i])
4208                 break;
4209             gcc_assert (j < n);
4210 
4211             tmp = vec[i];
4212             vec[i] = vec[j];
4213             vec[j] = tmp;
4214           }
4215     }
4216 
4217   return issue_more;
4218 }
4219 
4220 /* Return an EXPR correponding to INDEX element of ready list, if
4221    FOLLOW_READY_ELEMENT is true (i.e., an expr of
4222    ready_element (&ready, INDEX) will be returned), and to INDEX element of
4223    ready.vec otherwise.  */
4224 static inline expr_t
4225 find_expr_for_ready (int index, bool follow_ready_element)
4226 {
4227   expr_t expr;
4228   int real_index;
4229 
4230   real_index = follow_ready_element ? ready.first - index : index;
4231 
4232   expr = VEC_index (expr_t, vec_av_set, real_index);
4233   gcc_assert (ready.vec[real_index] == EXPR_INSN_RTX (expr));
4234 
4235   return expr;
4236 }
4237 
4238 /* Calculate insns worth trying via lookahead_guard hook.  Return a number
4239    of such insns found.  */
4240 static int
4241 invoke_dfa_lookahead_guard (void)
4242 {
4243   int i, n;
4244   bool have_hook
4245     = targetm.sched.first_cycle_multipass_dfa_lookahead_guard != NULL;
4246 
4247   if (sched_verbose >= 2)
4248     sel_print ("ready after reorder: ");
4249 
4250   for (i = 0, n = 0; i < ready.n_ready; i++)
4251     {
4252       expr_t expr;
4253       insn_t insn;
4254       int r;
4255 
4256       /* In this loop insn is Ith element of the ready list given by
4257          ready_element, not Ith element of ready.vec.  */
4258       insn = ready_element (&ready, i);
4259 
4260       if (! have_hook || i == 0)
4261         r = 0;
4262       else
4263         r = !targetm.sched.first_cycle_multipass_dfa_lookahead_guard (insn);
4264 
4265       gcc_assert (INSN_CODE (insn) >= 0);
4266 
4267       /* Only insns with ready_try = 0 can get here
4268          from fill_ready_list.  */
4269       gcc_assert (ready_try [i] == 0);
4270       ready_try[i] = r;
4271       if (!r)
4272         n++;
4273 
4274       expr = find_expr_for_ready (i, true);
4275 
4276       if (sched_verbose >= 2)
4277         {
4278           dump_vinsn (EXPR_VINSN (expr));
4279           sel_print (":%d; ", ready_try[i]);
4280         }
4281     }
4282 
4283   if (sched_verbose >= 2)
4284     sel_print ("\n");
4285   return n;
4286 }
4287 
4288 /* Calculate the number of privileged insns and return it.  */
4289 static int
4290 calculate_privileged_insns (void)
4291 {
4292   expr_t cur_expr, min_spec_expr = NULL;
4293   int privileged_n = 0, i;
4294 
4295   for (i = 0; i < ready.n_ready; i++)
4296     {
4297       if (ready_try[i])
4298         continue;
4299 
4300       if (! min_spec_expr)
4301 	min_spec_expr = find_expr_for_ready (i, true);
4302 
4303       cur_expr = find_expr_for_ready (i, true);
4304 
4305       if (EXPR_SPEC (cur_expr) > EXPR_SPEC (min_spec_expr))
4306         break;
4307 
4308       ++privileged_n;
4309     }
4310 
4311   if (i == ready.n_ready)
4312     privileged_n = 0;
4313 
4314   if (sched_verbose >= 2)
4315     sel_print ("privileged_n: %d insns with SPEC %d\n",
4316                privileged_n, privileged_n ? EXPR_SPEC (min_spec_expr) : -1);
4317   return privileged_n;
4318 }
4319 
4320 /* Call the rest of the hooks after the choice was made.  Return
4321    the number of insns that still can be issued given that the current
4322    number is ISSUE_MORE.  FENCE and BEST_INSN are the current fence
4323    and the insn chosen for scheduling, respectively.  */
4324 static int
4325 invoke_aftermath_hooks (fence_t fence, rtx best_insn, int issue_more)
4326 {
4327   gcc_assert (INSN_P (best_insn));
4328 
4329   /* First, call dfa_new_cycle, and then variable_issue, if available.  */
4330   sel_dfa_new_cycle (best_insn, fence);
4331 
4332   if (targetm.sched.variable_issue)
4333     {
4334       memcpy (curr_state, FENCE_STATE (fence), dfa_state_size);
4335       issue_more =
4336         targetm.sched.variable_issue (sched_dump, sched_verbose, best_insn,
4337                                       issue_more);
4338       memcpy (FENCE_STATE (fence), curr_state, dfa_state_size);
4339     }
4340   else if (GET_CODE (PATTERN (best_insn)) != USE
4341            && GET_CODE (PATTERN (best_insn)) != CLOBBER)
4342     issue_more--;
4343 
4344   return issue_more;
4345 }
4346 
4347 /* Estimate the cost of issuing INSN on DFA state STATE.  */
4348 static int
4349 estimate_insn_cost (rtx insn, state_t state)
4350 {
4351   static state_t temp = NULL;
4352   int cost;
4353 
4354   if (!temp)
4355     temp = xmalloc (dfa_state_size);
4356 
4357   memcpy (temp, state, dfa_state_size);
4358   cost = state_transition (temp, insn);
4359 
4360   if (cost < 0)
4361     return 0;
4362   else if (cost == 0)
4363     return 1;
4364   return cost;
4365 }
4366 
4367 /* Return the cost of issuing EXPR on the FENCE as estimated by DFA.
4368    This function properly handles ASMs, USEs etc.  */
4369 static int
4370 get_expr_cost (expr_t expr, fence_t fence)
4371 {
4372   rtx insn = EXPR_INSN_RTX (expr);
4373 
4374   if (recog_memoized (insn) < 0)
4375     {
4376       if (!FENCE_STARTS_CYCLE_P (fence)
4377 	  && INSN_ASM_P (insn))
4378 	/* This is asm insn which is tryed to be issued on the
4379 	   cycle not first.  Issue it on the next cycle.  */
4380 	return 1;
4381       else
4382 	/* A USE insn, or something else we don't need to
4383 	   understand.  We can't pass these directly to
4384 	   state_transition because it will trigger a
4385 	   fatal error for unrecognizable insns.  */
4386 	return 0;
4387     }
4388   else
4389     return estimate_insn_cost (insn, FENCE_STATE (fence));
4390 }
4391 
4392 /* Find the best insn for scheduling, either via max_issue or just take
4393    the most prioritized available.  */
4394 static int
4395 choose_best_insn (fence_t fence, int privileged_n, int *index)
4396 {
4397   int can_issue = 0;
4398 
4399   if (dfa_lookahead > 0)
4400     {
4401       cycle_issued_insns = FENCE_ISSUED_INSNS (fence);
4402       /* TODO: pass equivalent of first_cycle_insn_p to max_issue ().  */
4403       can_issue = max_issue (&ready, privileged_n,
4404                              FENCE_STATE (fence), true, index);
4405       if (sched_verbose >= 2)
4406         sel_print ("max_issue: we can issue %d insns, already did %d insns\n",
4407                    can_issue, FENCE_ISSUED_INSNS (fence));
4408     }
4409   else
4410     {
4411       /* We can't use max_issue; just return the first available element.  */
4412       int i;
4413 
4414       for (i = 0; i < ready.n_ready; i++)
4415 	{
4416 	  expr_t expr = find_expr_for_ready (i, true);
4417 
4418 	  if (get_expr_cost (expr, fence) < 1)
4419 	    {
4420 	      can_issue = can_issue_more;
4421 	      *index = i;
4422 
4423 	      if (sched_verbose >= 2)
4424 		sel_print ("using %dth insn from the ready list\n", i + 1);
4425 
4426 	      break;
4427 	    }
4428 	}
4429 
4430       if (i == ready.n_ready)
4431 	{
4432 	  can_issue = 0;
4433 	  *index = -1;
4434 	}
4435     }
4436 
4437   return can_issue;
4438 }
4439 
4440 /* Choose the best expr from *AV_VLIW_PTR and a suitable register for it.
4441    BNDS and FENCE are current boundaries and scheduling fence respectively.
4442    Return the expr found and NULL if nothing can be issued atm.
4443    Write to PNEED_STALL the number of cycles to stall if no expr was found.  */
4444 static expr_t
4445 find_best_expr (av_set_t *av_vliw_ptr, blist_t bnds, fence_t fence,
4446                 int *pneed_stall)
4447 {
4448   expr_t best;
4449 
4450   /* Choose the best insn for scheduling via:
4451      1) sorting the ready list based on priority;
4452      2) calling the reorder hook;
4453      3) calling max_issue.  */
4454   best = fill_ready_list (av_vliw_ptr, bnds, fence, pneed_stall);
4455   if (best == NULL && ready.n_ready > 0)
4456     {
4457       int privileged_n, index;
4458 
4459       can_issue_more = invoke_reorder_hooks (fence);
4460       if (can_issue_more > 0)
4461         {
4462           /* Try choosing the best insn until we find one that is could be
4463              scheduled due to liveness restrictions on its destination register.
4464              In the future, we'd like to choose once and then just probe insns
4465              in the order of their priority.  */
4466           invoke_dfa_lookahead_guard ();
4467           privileged_n = calculate_privileged_insns ();
4468           can_issue_more = choose_best_insn (fence, privileged_n, &index);
4469           if (can_issue_more)
4470             best = find_expr_for_ready (index, true);
4471         }
4472       /* We had some available insns, so if we can't issue them,
4473          we have a stall.  */
4474       if (can_issue_more == 0)
4475         {
4476           best = NULL;
4477           *pneed_stall = 1;
4478         }
4479     }
4480 
4481   if (best != NULL)
4482     {
4483       can_issue_more = invoke_aftermath_hooks (fence, EXPR_INSN_RTX (best),
4484                                                can_issue_more);
4485       if (targetm.sched.variable_issue
4486 	  && can_issue_more == 0)
4487         *pneed_stall = 1;
4488     }
4489 
4490   if (sched_verbose >= 2)
4491     {
4492       if (best != NULL)
4493         {
4494           sel_print ("Best expression (vliw form): ");
4495           dump_expr (best);
4496           sel_print ("; cycle %d\n", FENCE_CYCLE (fence));
4497         }
4498       else
4499         sel_print ("No best expr found!\n");
4500     }
4501 
4502   return best;
4503 }
4504 
4505 
4506 /* Functions that implement the core of the scheduler.  */
4507 
4508 
4509 /* Emit an instruction from EXPR with SEQNO and VINSN after
4510    PLACE_TO_INSERT.  */
4511 static insn_t
4512 emit_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
4513                            insn_t place_to_insert)
4514 {
4515   /* This assert fails when we have identical instructions
4516      one of which dominates the other.  In this case move_op ()
4517      finds the first instruction and doesn't search for second one.
4518      The solution would be to compute av_set after the first found
4519      insn and, if insn present in that set, continue searching.
4520      For now we workaround this issue in move_op.  */
4521   gcc_assert (!INSN_IN_STREAM_P (EXPR_INSN_RTX (expr)));
4522 
4523   if (EXPR_WAS_RENAMED (expr))
4524     {
4525       unsigned regno = expr_dest_regno (expr);
4526 
4527       if (HARD_REGISTER_NUM_P (regno))
4528 	{
4529 	  df_set_regs_ever_live (regno, true);
4530 	  reg_rename_tick[regno] = ++reg_rename_this_tick;
4531 	}
4532     }
4533 
4534   return sel_gen_insn_from_expr_after (expr, vinsn, seqno,
4535                                        place_to_insert);
4536 }
4537 
4538 /* Return TRUE if BB can hold bookkeeping code.  */
4539 static bool
4540 block_valid_for_bookkeeping_p (basic_block bb)
4541 {
4542   insn_t bb_end = BB_END (bb);
4543 
4544   if (!in_current_region_p (bb) || EDGE_COUNT (bb->succs) > 1)
4545     return false;
4546 
4547   if (INSN_P (bb_end))
4548     {
4549       if (INSN_SCHED_TIMES (bb_end) > 0)
4550 	return false;
4551     }
4552   else
4553     gcc_assert (NOTE_INSN_BASIC_BLOCK_P (bb_end));
4554 
4555   return true;
4556 }
4557 
4558 /* Attempt to find a block that can hold bookkeeping code for path(s) incoming
4559    into E2->dest, except from E1->src (there may be a sequence of empty basic
4560    blocks between E1->src and E2->dest).  Return found block, or NULL if new
4561    one must be created.  If LAX holds, don't assume there is a simple path
4562    from E1->src to E2->dest.  */
4563 static basic_block
4564 find_block_for_bookkeeping (edge e1, edge e2, bool lax)
4565 {
4566   basic_block candidate_block = NULL;
4567   edge e;
4568 
4569   /* Loop over edges from E1 to E2, inclusive.  */
4570   for (e = e1; !lax || e->dest != EXIT_BLOCK_PTR; e = EDGE_SUCC (e->dest, 0))
4571     {
4572       if (EDGE_COUNT (e->dest->preds) == 2)
4573 	{
4574 	  if (candidate_block == NULL)
4575 	    candidate_block = (EDGE_PRED (e->dest, 0) == e
4576 			       ? EDGE_PRED (e->dest, 1)->src
4577 			       : EDGE_PRED (e->dest, 0)->src);
4578 	  else
4579 	    /* Found additional edge leading to path from e1 to e2
4580 	       from aside.  */
4581 	    return NULL;
4582 	}
4583       else if (EDGE_COUNT (e->dest->preds) > 2)
4584 	/* Several edges leading to path from e1 to e2 from aside.  */
4585 	return NULL;
4586 
4587       if (e == e2)
4588 	return ((!lax || candidate_block)
4589 		&& block_valid_for_bookkeeping_p (candidate_block)
4590 		? candidate_block
4591 		: NULL);
4592 
4593       if (lax && EDGE_COUNT (e->dest->succs) != 1)
4594 	return NULL;
4595     }
4596 
4597   if (lax)
4598     return NULL;
4599 
4600   gcc_unreachable ();
4601 }
4602 
4603 /* Create new basic block for bookkeeping code for path(s) incoming into
4604    E2->dest, except from E1->src.  Return created block.  */
4605 static basic_block
4606 create_block_for_bookkeeping (edge e1, edge e2)
4607 {
4608   basic_block new_bb, bb = e2->dest;
4609 
4610   /* Check that we don't spoil the loop structure.  */
4611   if (current_loop_nest)
4612     {
4613       basic_block latch = current_loop_nest->latch;
4614 
4615       /* We do not split header.  */
4616       gcc_assert (e2->dest != current_loop_nest->header);
4617 
4618       /* We do not redirect the only edge to the latch block.  */
4619       gcc_assert (e1->dest != latch
4620 		  || !single_pred_p (latch)
4621 		  || e1 != single_pred_edge (latch));
4622     }
4623 
4624   /* Split BB to insert BOOK_INSN there.  */
4625   new_bb = sched_split_block (bb, NULL);
4626 
4627   /* Move note_list from the upper bb.  */
4628   gcc_assert (BB_NOTE_LIST (new_bb) == NULL_RTX);
4629   BB_NOTE_LIST (new_bb) = BB_NOTE_LIST (bb);
4630   BB_NOTE_LIST (bb) = NULL_RTX;
4631 
4632   gcc_assert (e2->dest == bb);
4633 
4634   /* Skip block for bookkeeping copy when leaving E1->src.  */
4635   if (e1->flags & EDGE_FALLTHRU)
4636     sel_redirect_edge_and_branch_force (e1, new_bb);
4637   else
4638     sel_redirect_edge_and_branch (e1, new_bb);
4639 
4640   gcc_assert (e1->dest == new_bb);
4641   gcc_assert (sel_bb_empty_p (bb));
4642 
4643   /* To keep basic block numbers in sync between debug and non-debug
4644      compilations, we have to rotate blocks here.  Consider that we
4645      started from (a,b)->d, (c,d)->e, and d contained only debug
4646      insns.  It would have been removed before if the debug insns
4647      weren't there, so we'd have split e rather than d.  So what we do
4648      now is to swap the block numbers of new_bb and
4649      single_succ(new_bb) == e, so that the insns that were in e before
4650      get the new block number.  */
4651 
4652   if (MAY_HAVE_DEBUG_INSNS)
4653     {
4654       basic_block succ;
4655       insn_t insn = sel_bb_head (new_bb);
4656       insn_t last;
4657 
4658       if (DEBUG_INSN_P (insn)
4659 	  && single_succ_p (new_bb)
4660 	  && (succ = single_succ (new_bb))
4661 	  && succ != EXIT_BLOCK_PTR
4662 	  && DEBUG_INSN_P ((last = sel_bb_end (new_bb))))
4663 	{
4664 	  while (insn != last && (DEBUG_INSN_P (insn) || NOTE_P (insn)))
4665 	    insn = NEXT_INSN (insn);
4666 
4667 	  if (insn == last)
4668 	    {
4669 	      sel_global_bb_info_def gbi;
4670 	      sel_region_bb_info_def rbi;
4671 	      int i;
4672 
4673 	      if (sched_verbose >= 2)
4674 		sel_print ("Swapping block ids %i and %i\n",
4675 			   new_bb->index, succ->index);
4676 
4677 	      i = new_bb->index;
4678 	      new_bb->index = succ->index;
4679 	      succ->index = i;
4680 
4681 	      SET_BASIC_BLOCK (new_bb->index, new_bb);
4682 	      SET_BASIC_BLOCK (succ->index, succ);
4683 
4684 	      memcpy (&gbi, SEL_GLOBAL_BB_INFO (new_bb), sizeof (gbi));
4685 	      memcpy (SEL_GLOBAL_BB_INFO (new_bb), SEL_GLOBAL_BB_INFO (succ),
4686 		      sizeof (gbi));
4687 	      memcpy (SEL_GLOBAL_BB_INFO (succ), &gbi, sizeof (gbi));
4688 
4689 	      memcpy (&rbi, SEL_REGION_BB_INFO (new_bb), sizeof (rbi));
4690 	      memcpy (SEL_REGION_BB_INFO (new_bb), SEL_REGION_BB_INFO (succ),
4691 		      sizeof (rbi));
4692 	      memcpy (SEL_REGION_BB_INFO (succ), &rbi, sizeof (rbi));
4693 
4694 	      i = BLOCK_TO_BB (new_bb->index);
4695 	      BLOCK_TO_BB (new_bb->index) = BLOCK_TO_BB (succ->index);
4696 	      BLOCK_TO_BB (succ->index) = i;
4697 
4698 	      i = CONTAINING_RGN (new_bb->index);
4699 	      CONTAINING_RGN (new_bb->index) = CONTAINING_RGN (succ->index);
4700 	      CONTAINING_RGN (succ->index) = i;
4701 
4702 	      for (i = 0; i < current_nr_blocks; i++)
4703 		if (BB_TO_BLOCK (i) == succ->index)
4704 		  BB_TO_BLOCK (i) = new_bb->index;
4705 		else if (BB_TO_BLOCK (i) == new_bb->index)
4706 		  BB_TO_BLOCK (i) = succ->index;
4707 
4708 	      FOR_BB_INSNS (new_bb, insn)
4709 		if (INSN_P (insn))
4710 		  EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index;
4711 
4712 	      FOR_BB_INSNS (succ, insn)
4713 		if (INSN_P (insn))
4714 		  EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = succ->index;
4715 
4716 	      if (bitmap_clear_bit (code_motion_visited_blocks, new_bb->index))
4717 		bitmap_set_bit (code_motion_visited_blocks, succ->index);
4718 
4719 	      gcc_assert (LABEL_P (BB_HEAD (new_bb))
4720 			  && LABEL_P (BB_HEAD (succ)));
4721 
4722 	      if (sched_verbose >= 4)
4723 		sel_print ("Swapping code labels %i and %i\n",
4724 			   CODE_LABEL_NUMBER (BB_HEAD (new_bb)),
4725 			   CODE_LABEL_NUMBER (BB_HEAD (succ)));
4726 
4727 	      i = CODE_LABEL_NUMBER (BB_HEAD (new_bb));
4728 	      CODE_LABEL_NUMBER (BB_HEAD (new_bb))
4729 		= CODE_LABEL_NUMBER (BB_HEAD (succ));
4730 	      CODE_LABEL_NUMBER (BB_HEAD (succ)) = i;
4731 	    }
4732 	}
4733     }
4734 
4735   return bb;
4736 }
4737 
4738 /* Return insn after which we must insert bookkeeping code for path(s) incoming
4739    into E2->dest, except from E1->src.  If the returned insn immediately
4740    precedes a fence, assign that fence to *FENCE_TO_REWIND.  */
4741 static insn_t
4742 find_place_for_bookkeeping (edge e1, edge e2, fence_t *fence_to_rewind)
4743 {
4744   insn_t place_to_insert;
4745   /* Find a basic block that can hold bookkeeping.  If it can be found, do not
4746      create new basic block, but insert bookkeeping there.  */
4747   basic_block book_block = find_block_for_bookkeeping (e1, e2, FALSE);
4748 
4749   if (book_block)
4750     {
4751       place_to_insert = BB_END (book_block);
4752 
4753       /* Don't use a block containing only debug insns for
4754 	 bookkeeping, this causes scheduling differences between debug
4755 	 and non-debug compilations, for the block would have been
4756 	 removed already.  */
4757       if (DEBUG_INSN_P (place_to_insert))
4758 	{
4759 	  rtx insn = sel_bb_head (book_block);
4760 
4761 	  while (insn != place_to_insert &&
4762 		 (DEBUG_INSN_P (insn) || NOTE_P (insn)))
4763 	    insn = NEXT_INSN (insn);
4764 
4765 	  if (insn == place_to_insert)
4766 	    book_block = NULL;
4767 	}
4768     }
4769 
4770   if (!book_block)
4771     {
4772       book_block = create_block_for_bookkeeping (e1, e2);
4773       place_to_insert = BB_END (book_block);
4774       if (sched_verbose >= 9)
4775 	sel_print ("New block is %i, split from bookkeeping block %i\n",
4776 		   EDGE_SUCC (book_block, 0)->dest->index, book_block->index);
4777     }
4778   else
4779     {
4780       if (sched_verbose >= 9)
4781 	sel_print ("Pre-existing bookkeeping block is %i\n", book_block->index);
4782     }
4783 
4784   *fence_to_rewind = NULL;
4785   /* If basic block ends with a jump, insert bookkeeping code right before it.
4786      Notice if we are crossing a fence when taking PREV_INSN.  */
4787   if (INSN_P (place_to_insert) && control_flow_insn_p (place_to_insert))
4788     {
4789       *fence_to_rewind = flist_lookup (fences, place_to_insert);
4790       place_to_insert = PREV_INSN (place_to_insert);
4791     }
4792 
4793   return place_to_insert;
4794 }
4795 
4796 /* Find a proper seqno for bookkeeing insn inserted at PLACE_TO_INSERT
4797    for JOIN_POINT.   */
4798 static int
4799 find_seqno_for_bookkeeping (insn_t place_to_insert, insn_t join_point)
4800 {
4801   int seqno;
4802   rtx next;
4803 
4804   /* Check if we are about to insert bookkeeping copy before a jump, and use
4805      jump's seqno for the copy; otherwise, use JOIN_POINT's seqno.  */
4806   next = NEXT_INSN (place_to_insert);
4807   if (INSN_P (next)
4808       && JUMP_P (next)
4809       && BLOCK_FOR_INSN (next) == BLOCK_FOR_INSN (place_to_insert))
4810     {
4811       gcc_assert (INSN_SCHED_TIMES (next) == 0);
4812       seqno = INSN_SEQNO (next);
4813     }
4814   else if (INSN_SEQNO (join_point) > 0)
4815     seqno = INSN_SEQNO (join_point);
4816   else
4817     {
4818       seqno = get_seqno_by_preds (place_to_insert);
4819 
4820       /* Sometimes the fences can move in such a way that there will be
4821          no instructions with positive seqno around this bookkeeping.
4822          This means that there will be no way to get to it by a regular
4823          fence movement.  Never mind because we pick up such pieces for
4824          rescheduling anyways, so any positive value will do for now.  */
4825       if (seqno < 0)
4826         {
4827           gcc_assert (pipelining_p);
4828           seqno = 1;
4829         }
4830     }
4831 
4832   gcc_assert (seqno > 0);
4833   return seqno;
4834 }
4835 
4836 /* Insert bookkeeping copy of C_EXPS's insn after PLACE_TO_INSERT, assigning
4837    NEW_SEQNO to it.  Return created insn.  */
4838 static insn_t
4839 emit_bookkeeping_insn (insn_t place_to_insert, expr_t c_expr, int new_seqno)
4840 {
4841   rtx new_insn_rtx = create_copy_of_insn_rtx (EXPR_INSN_RTX (c_expr));
4842 
4843   vinsn_t new_vinsn
4844     = create_vinsn_from_insn_rtx (new_insn_rtx,
4845 				  VINSN_UNIQUE_P (EXPR_VINSN (c_expr)));
4846 
4847   insn_t new_insn = emit_insn_from_expr_after (c_expr, new_vinsn, new_seqno,
4848 					       place_to_insert);
4849 
4850   INSN_SCHED_TIMES (new_insn) = 0;
4851   bitmap_set_bit (current_copies, INSN_UID (new_insn));
4852 
4853   return new_insn;
4854 }
4855 
4856 /* Generate a bookkeeping copy of C_EXPR's insn for path(s) incoming into to
4857    E2->dest, except from E1->src (there may be a sequence of empty blocks
4858    between E1->src and E2->dest).  Return block containing the copy.
4859    All scheduler data is initialized for the newly created insn.  */
4860 static basic_block
4861 generate_bookkeeping_insn (expr_t c_expr, edge e1, edge e2)
4862 {
4863   insn_t join_point, place_to_insert, new_insn;
4864   int new_seqno;
4865   bool need_to_exchange_data_sets;
4866   fence_t fence_to_rewind;
4867 
4868   if (sched_verbose >= 4)
4869     sel_print ("Generating bookkeeping insn (%d->%d)\n", e1->src->index,
4870 	       e2->dest->index);
4871 
4872   join_point = sel_bb_head (e2->dest);
4873   place_to_insert = find_place_for_bookkeeping (e1, e2, &fence_to_rewind);
4874   new_seqno = find_seqno_for_bookkeeping (place_to_insert, join_point);
4875   need_to_exchange_data_sets
4876     = sel_bb_empty_p (BLOCK_FOR_INSN (place_to_insert));
4877 
4878   new_insn = emit_bookkeeping_insn (place_to_insert, c_expr, new_seqno);
4879 
4880   if (fence_to_rewind)
4881     FENCE_INSN (fence_to_rewind) = new_insn;
4882 
4883   /* When inserting bookkeeping insn in new block, av sets should be
4884      following: old basic block (that now holds bookkeeping) data sets are
4885      the same as was before generation of bookkeeping, and new basic block
4886      (that now hold all other insns of old basic block) data sets are
4887      invalid.  So exchange data sets for these basic blocks as sel_split_block
4888      mistakenly exchanges them in this case.  Cannot do it earlier because
4889      when single instruction is added to new basic block it should hold NULL
4890      lv_set.  */
4891   if (need_to_exchange_data_sets)
4892     exchange_data_sets (BLOCK_FOR_INSN (new_insn),
4893 			BLOCK_FOR_INSN (join_point));
4894 
4895   stat_bookkeeping_copies++;
4896   return BLOCK_FOR_INSN (new_insn);
4897 }
4898 
4899 /* Remove from AV_PTR all insns that may need bookkeeping when scheduling
4900    on FENCE, but we are unable to copy them.  */
4901 static void
4902 remove_insns_that_need_bookkeeping (fence_t fence, av_set_t *av_ptr)
4903 {
4904   expr_t expr;
4905   av_set_iterator i;
4906 
4907   /*  An expression does not need bookkeeping if it is available on all paths
4908       from current block to original block and current block dominates
4909       original block.  We check availability on all paths by examining
4910       EXPR_SPEC; this is not equivalent, because it may be positive even
4911       if expr is available on all paths (but if expr is not available on
4912       any path, EXPR_SPEC will be positive).  */
4913 
4914   FOR_EACH_EXPR_1 (expr, i, av_ptr)
4915     {
4916       if (!control_flow_insn_p (EXPR_INSN_RTX (expr))
4917 	  && (!bookkeeping_p || VINSN_UNIQUE_P (EXPR_VINSN (expr)))
4918 	  && (EXPR_SPEC (expr)
4919 	      || !EXPR_ORIG_BB_INDEX (expr)
4920 	      || !dominated_by_p (CDI_DOMINATORS,
4921 				  BASIC_BLOCK (EXPR_ORIG_BB_INDEX (expr)),
4922 				  BLOCK_FOR_INSN (FENCE_INSN (fence)))))
4923 	{
4924           if (sched_verbose >= 4)
4925             sel_print ("Expr %d removed because it would need bookkeeping, which "
4926                        "cannot be created\n", INSN_UID (EXPR_INSN_RTX (expr)));
4927 	  av_set_iter_remove (&i);
4928 	}
4929     }
4930 }
4931 
4932 /* Moving conditional jump through some instructions.
4933 
4934    Consider example:
4935 
4936        ...                     <- current scheduling point
4937        NOTE BASIC BLOCK:       <- bb header
4938        (p8)  add r14=r14+0x9;;
4939        (p8)  mov [r14]=r23
4940        (!p8) jump L1;;
4941        NOTE BASIC BLOCK:
4942        ...
4943 
4944    We can schedule jump one cycle earlier, than mov, because they cannot be
4945    executed together as their predicates are mutually exclusive.
4946 
4947    This is done in this way: first, new fallthrough basic block is created
4948    after jump (it is always can be done, because there already should be a
4949    fallthrough block, where control flow goes in case of predicate being true -
4950    in our example; otherwise there should be a dependence between those
4951    instructions and jump and we cannot schedule jump right now);
4952    next, all instructions between jump and current scheduling point are moved
4953    to this new block.  And the result is this:
4954 
4955       NOTE BASIC BLOCK:
4956       (!p8) jump L1           <- current scheduling point
4957       NOTE BASIC BLOCK:       <- bb header
4958       (p8)  add r14=r14+0x9;;
4959       (p8)  mov [r14]=r23
4960       NOTE BASIC BLOCK:
4961       ...
4962 */
4963 static void
4964 move_cond_jump (rtx insn, bnd_t bnd)
4965 {
4966   edge ft_edge;
4967   basic_block block_from, block_next, block_new, block_bnd, bb;
4968   rtx next, prev, link, head;
4969 
4970   block_from = BLOCK_FOR_INSN (insn);
4971   block_bnd = BLOCK_FOR_INSN (BND_TO (bnd));
4972   prev = BND_TO (bnd);
4973 
4974 #ifdef ENABLE_CHECKING
4975   /* Moving of jump should not cross any other jumps or beginnings of new
4976      basic blocks.  The only exception is when we move a jump through
4977      mutually exclusive insns along fallthru edges.  */
4978   if (block_from != block_bnd)
4979     {
4980       bb = block_from;
4981       for (link = PREV_INSN (insn); link != PREV_INSN (prev);
4982            link = PREV_INSN (link))
4983         {
4984           if (INSN_P (link))
4985             gcc_assert (sched_insns_conditions_mutex_p (insn, link));
4986           if (BLOCK_FOR_INSN (link) && BLOCK_FOR_INSN (link) != bb)
4987             {
4988               gcc_assert (single_pred (bb) == BLOCK_FOR_INSN (link));
4989               bb = BLOCK_FOR_INSN (link);
4990             }
4991         }
4992     }
4993 #endif
4994 
4995   /* Jump is moved to the boundary.  */
4996   next = PREV_INSN (insn);
4997   BND_TO (bnd) = insn;
4998 
4999   ft_edge = find_fallthru_edge_from (block_from);
5000   block_next = ft_edge->dest;
5001   /* There must be a fallthrough block (or where should go
5002   control flow in case of false jump predicate otherwise?).  */
5003   gcc_assert (block_next);
5004 
5005   /* Create new empty basic block after source block.  */
5006   block_new = sel_split_edge (ft_edge);
5007   gcc_assert (block_new->next_bb == block_next
5008               && block_from->next_bb == block_new);
5009 
5010   /* Move all instructions except INSN to BLOCK_NEW.  */
5011   bb = block_bnd;
5012   head = BB_HEAD (block_new);
5013   while (bb != block_from->next_bb)
5014     {
5015       rtx from, to;
5016       from = bb == block_bnd ? prev : sel_bb_head (bb);
5017       to = bb == block_from ? next : sel_bb_end (bb);
5018 
5019       /* The jump being moved can be the first insn in the block.
5020          In this case we don't have to move anything in this block.  */
5021       if (NEXT_INSN (to) != from)
5022         {
5023           reorder_insns (from, to, head);
5024 
5025           for (link = to; link != head; link = PREV_INSN (link))
5026             EXPR_ORIG_BB_INDEX (INSN_EXPR (link)) = block_new->index;
5027           head = to;
5028         }
5029 
5030       /* Cleanup possibly empty blocks left.  */
5031       block_next = bb->next_bb;
5032       if (bb != block_from)
5033 	tidy_control_flow (bb, false);
5034       bb = block_next;
5035     }
5036 
5037   /* Assert there is no jump to BLOCK_NEW, only fallthrough edge.  */
5038   gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (block_new)));
5039 
5040   gcc_assert (!sel_bb_empty_p (block_from)
5041               && !sel_bb_empty_p (block_new));
5042 
5043   /* Update data sets for BLOCK_NEW to represent that INSN and
5044      instructions from the other branch of INSN is no longer
5045      available at BLOCK_NEW.  */
5046   BB_AV_LEVEL (block_new) = global_level;
5047   gcc_assert (BB_LV_SET (block_new) == NULL);
5048   BB_LV_SET (block_new) = get_clear_regset_from_pool ();
5049   update_data_sets (sel_bb_head (block_new));
5050 
5051   /* INSN is a new basic block header - so prepare its data
5052      structures and update availability and liveness sets.  */
5053   update_data_sets (insn);
5054 
5055   if (sched_verbose >= 4)
5056     sel_print ("Moving jump %d\n", INSN_UID (insn));
5057 }
5058 
5059 /* Remove nops generated during move_op for preventing removal of empty
5060    basic blocks.  */
5061 static void
5062 remove_temp_moveop_nops (bool full_tidying)
5063 {
5064   int i;
5065   insn_t insn;
5066 
5067   FOR_EACH_VEC_ELT (insn_t, vec_temp_moveop_nops, i, insn)
5068     {
5069       gcc_assert (INSN_NOP_P (insn));
5070       return_nop_to_pool (insn, full_tidying);
5071     }
5072 
5073   /* Empty the vector.  */
5074   if (VEC_length (insn_t, vec_temp_moveop_nops) > 0)
5075     VEC_block_remove (insn_t, vec_temp_moveop_nops, 0,
5076 		      VEC_length (insn_t, vec_temp_moveop_nops));
5077 }
5078 
5079 /* Records the maximal UID before moving up an instruction.  Used for
5080    distinguishing between bookkeeping copies and original insns.  */
5081 static int max_uid_before_move_op = 0;
5082 
5083 /* Remove from AV_VLIW_P all instructions but next when debug counter
5084    tells us so.  Next instruction is fetched from BNDS.  */
5085 static void
5086 remove_insns_for_debug (blist_t bnds, av_set_t *av_vliw_p)
5087 {
5088   if (! dbg_cnt (sel_sched_insn_cnt))
5089     /* Leave only the next insn in av_vliw.  */
5090     {
5091       av_set_iterator av_it;
5092       expr_t expr;
5093       bnd_t bnd = BLIST_BND (bnds);
5094       insn_t next = BND_TO (bnd);
5095 
5096       gcc_assert (BLIST_NEXT (bnds) == NULL);
5097 
5098       FOR_EACH_EXPR_1 (expr, av_it, av_vliw_p)
5099         if (EXPR_INSN_RTX (expr) != next)
5100           av_set_iter_remove (&av_it);
5101     }
5102 }
5103 
5104 /* Compute available instructions on BNDS.  FENCE is the current fence.  Write
5105    the computed set to *AV_VLIW_P.  */
5106 static void
5107 compute_av_set_on_boundaries (fence_t fence, blist_t bnds, av_set_t *av_vliw_p)
5108 {
5109   if (sched_verbose >= 2)
5110     {
5111       sel_print ("Boundaries: ");
5112       dump_blist (bnds);
5113       sel_print ("\n");
5114     }
5115 
5116   for (; bnds; bnds = BLIST_NEXT (bnds))
5117     {
5118       bnd_t bnd = BLIST_BND (bnds);
5119       av_set_t av1_copy;
5120       insn_t bnd_to = BND_TO (bnd);
5121 
5122       /* Rewind BND->TO to the basic block header in case some bookkeeping
5123          instructions were inserted before BND->TO and it needs to be
5124          adjusted.  */
5125       if (sel_bb_head_p (bnd_to))
5126         gcc_assert (INSN_SCHED_TIMES (bnd_to) == 0);
5127       else
5128         while (INSN_SCHED_TIMES (PREV_INSN (bnd_to)) == 0)
5129           {
5130             bnd_to = PREV_INSN (bnd_to);
5131             if (sel_bb_head_p (bnd_to))
5132               break;
5133           }
5134 
5135       if (BND_TO (bnd) != bnd_to)
5136 	{
5137   	  gcc_assert (FENCE_INSN (fence) == BND_TO (bnd));
5138 	  FENCE_INSN (fence) = bnd_to;
5139 	  BND_TO (bnd) = bnd_to;
5140 	}
5141 
5142       av_set_clear (&BND_AV (bnd));
5143       BND_AV (bnd) = compute_av_set (BND_TO (bnd), NULL, 0, true);
5144 
5145       av_set_clear (&BND_AV1 (bnd));
5146       BND_AV1 (bnd) = av_set_copy (BND_AV (bnd));
5147 
5148       moveup_set_inside_insn_group (&BND_AV1 (bnd), NULL);
5149 
5150       av1_copy = av_set_copy (BND_AV1 (bnd));
5151       av_set_union_and_clear (av_vliw_p, &av1_copy, NULL);
5152     }
5153 
5154   if (sched_verbose >= 2)
5155     {
5156       sel_print ("Available exprs (vliw form): ");
5157       dump_av_set (*av_vliw_p);
5158       sel_print ("\n");
5159     }
5160 }
5161 
5162 /* Calculate the sequential av set on BND corresponding to the EXPR_VLIW
5163    expression.  When FOR_MOVEOP is true, also replace the register of
5164    expressions found with the register from EXPR_VLIW.  */
5165 static av_set_t
5166 find_sequential_best_exprs (bnd_t bnd, expr_t expr_vliw, bool for_moveop)
5167 {
5168   av_set_t expr_seq = NULL;
5169   expr_t expr;
5170   av_set_iterator i;
5171 
5172   FOR_EACH_EXPR (expr, i, BND_AV (bnd))
5173     {
5174       if (equal_after_moveup_path_p (expr, NULL, expr_vliw))
5175         {
5176           if (for_moveop)
5177             {
5178               /* The sequential expression has the right form to pass
5179                  to move_op except when renaming happened.  Put the
5180                  correct register in EXPR then.  */
5181               if (EXPR_SEPARABLE_P (expr) && REG_P (EXPR_LHS (expr)))
5182 		{
5183                   if (expr_dest_regno (expr) != expr_dest_regno (expr_vliw))
5184 		    {
5185 		      replace_dest_with_reg_in_expr (expr, EXPR_LHS (expr_vliw));
5186 		      stat_renamed_scheduled++;
5187 		    }
5188 		  /* Also put the correct TARGET_AVAILABLE bit on the expr.
5189                      This is needed when renaming came up with original
5190                      register.  */
5191                   else if (EXPR_TARGET_AVAILABLE (expr)
5192                            != EXPR_TARGET_AVAILABLE (expr_vliw))
5193 		    {
5194 		      gcc_assert (EXPR_TARGET_AVAILABLE (expr_vliw) == 1);
5195 		      EXPR_TARGET_AVAILABLE (expr) = 1;
5196 		    }
5197 		}
5198               if (EXPR_WAS_SUBSTITUTED (expr))
5199                 stat_substitutions_total++;
5200             }
5201 
5202           av_set_add (&expr_seq, expr);
5203 
5204           /* With substitution inside insn group, it is possible
5205              that more than one expression in expr_seq will correspond
5206              to expr_vliw.  In this case, choose one as the attempt to
5207              move both leads to miscompiles.  */
5208           break;
5209         }
5210     }
5211 
5212   if (for_moveop && sched_verbose >= 2)
5213     {
5214       sel_print ("Best expression(s) (sequential form): ");
5215       dump_av_set (expr_seq);
5216       sel_print ("\n");
5217     }
5218 
5219   return expr_seq;
5220 }
5221 
5222 
5223 /* Move nop to previous block.  */
5224 static void ATTRIBUTE_UNUSED
5225 move_nop_to_previous_block (insn_t nop, basic_block prev_bb)
5226 {
5227   insn_t prev_insn, next_insn, note;
5228 
5229   gcc_assert (sel_bb_head_p (nop)
5230               && prev_bb == BLOCK_FOR_INSN (nop)->prev_bb);
5231   note = bb_note (BLOCK_FOR_INSN (nop));
5232   prev_insn = sel_bb_end (prev_bb);
5233   next_insn = NEXT_INSN (nop);
5234   gcc_assert (prev_insn != NULL_RTX
5235               && PREV_INSN (note) == prev_insn);
5236 
5237   NEXT_INSN (prev_insn) = nop;
5238   PREV_INSN (nop) = prev_insn;
5239 
5240   PREV_INSN (note) = nop;
5241   NEXT_INSN (note) = next_insn;
5242 
5243   NEXT_INSN (nop) = note;
5244   PREV_INSN (next_insn) = note;
5245 
5246   BB_END (prev_bb) = nop;
5247   BLOCK_FOR_INSN (nop) = prev_bb;
5248 }
5249 
5250 /* Prepare a place to insert the chosen expression on BND.  */
5251 static insn_t
5252 prepare_place_to_insert (bnd_t bnd)
5253 {
5254   insn_t place_to_insert;
5255 
5256   /* Init place_to_insert before calling move_op, as the later
5257      can possibly remove BND_TO (bnd).  */
5258   if (/* If this is not the first insn scheduled.  */
5259       BND_PTR (bnd))
5260     {
5261       /* Add it after last scheduled.  */
5262       place_to_insert = ILIST_INSN (BND_PTR (bnd));
5263       if (DEBUG_INSN_P (place_to_insert))
5264 	{
5265 	  ilist_t l = BND_PTR (bnd);
5266 	  while ((l = ILIST_NEXT (l)) &&
5267 		 DEBUG_INSN_P (ILIST_INSN (l)))
5268 	    ;
5269 	  if (!l)
5270 	    place_to_insert = NULL;
5271 	}
5272     }
5273   else
5274     place_to_insert = NULL;
5275 
5276   if (!place_to_insert)
5277     {
5278       /* Add it before BND_TO.  The difference is in the
5279          basic block, where INSN will be added.  */
5280       place_to_insert = get_nop_from_pool (BND_TO (bnd));
5281       gcc_assert (BLOCK_FOR_INSN (place_to_insert)
5282                   == BLOCK_FOR_INSN (BND_TO (bnd)));
5283     }
5284 
5285   return place_to_insert;
5286 }
5287 
5288 /* Find original instructions for EXPR_SEQ and move it to BND boundary.
5289    Return the expression to emit in C_EXPR.  */
5290 static bool
5291 move_exprs_to_boundary (bnd_t bnd, expr_t expr_vliw,
5292                         av_set_t expr_seq, expr_t c_expr)
5293 {
5294   bool b, should_move;
5295   unsigned book_uid;
5296   bitmap_iterator bi;
5297   int n_bookkeeping_copies_before_moveop;
5298 
5299   /* Make a move.  This call will remove the original operation,
5300      insert all necessary bookkeeping instructions and update the
5301      data sets.  After that all we have to do is add the operation
5302      at before BND_TO (BND).  */
5303   n_bookkeeping_copies_before_moveop = stat_bookkeeping_copies;
5304   max_uid_before_move_op = get_max_uid ();
5305   bitmap_clear (current_copies);
5306   bitmap_clear (current_originators);
5307 
5308   b = move_op (BND_TO (bnd), expr_seq, expr_vliw,
5309                get_dest_from_orig_ops (expr_seq), c_expr, &should_move);
5310 
5311   /* We should be able to find the expression we've chosen for
5312      scheduling.  */
5313   gcc_assert (b);
5314 
5315   if (stat_bookkeeping_copies > n_bookkeeping_copies_before_moveop)
5316     stat_insns_needed_bookkeeping++;
5317 
5318   EXECUTE_IF_SET_IN_BITMAP (current_copies, 0, book_uid, bi)
5319     {
5320       unsigned uid;
5321       bitmap_iterator bi;
5322 
5323       /* We allocate these bitmaps lazily.  */
5324       if (! INSN_ORIGINATORS_BY_UID (book_uid))
5325         INSN_ORIGINATORS_BY_UID (book_uid) = BITMAP_ALLOC (NULL);
5326 
5327       bitmap_copy (INSN_ORIGINATORS_BY_UID (book_uid),
5328                    current_originators);
5329 
5330       /* Transitively add all originators' originators.  */
5331       EXECUTE_IF_SET_IN_BITMAP (current_originators, 0, uid, bi)
5332        if (INSN_ORIGINATORS_BY_UID (uid))
5333 	 bitmap_ior_into (INSN_ORIGINATORS_BY_UID (book_uid),
5334 			  INSN_ORIGINATORS_BY_UID (uid));
5335     }
5336 
5337   return should_move;
5338 }
5339 
5340 
5341 /* Debug a DFA state as an array of bytes.  */
5342 static void
5343 debug_state (state_t state)
5344 {
5345   unsigned char *p;
5346   unsigned int i, size = dfa_state_size;
5347 
5348   sel_print ("state (%u):", size);
5349   for (i = 0, p = (unsigned char *) state; i < size; i++)
5350     sel_print (" %d", p[i]);
5351   sel_print ("\n");
5352 }
5353 
5354 /* Advance state on FENCE with INSN.  Return true if INSN is
5355    an ASM, and we should advance state once more.  */
5356 static bool
5357 advance_state_on_fence (fence_t fence, insn_t insn)
5358 {
5359   bool asm_p;
5360 
5361   if (recog_memoized (insn) >= 0)
5362     {
5363       int res;
5364       state_t temp_state = alloca (dfa_state_size);
5365 
5366       gcc_assert (!INSN_ASM_P (insn));
5367       asm_p = false;
5368 
5369       memcpy (temp_state, FENCE_STATE (fence), dfa_state_size);
5370       res = state_transition (FENCE_STATE (fence), insn);
5371       gcc_assert (res < 0);
5372 
5373       if (memcmp (temp_state, FENCE_STATE (fence), dfa_state_size))
5374         {
5375           FENCE_ISSUED_INSNS (fence)++;
5376 
5377           /* We should never issue more than issue_rate insns.  */
5378           if (FENCE_ISSUED_INSNS (fence) > issue_rate)
5379             gcc_unreachable ();
5380         }
5381     }
5382   else
5383     {
5384       /* This could be an ASM insn which we'd like to schedule
5385          on the next cycle.  */
5386       asm_p = INSN_ASM_P (insn);
5387       if (!FENCE_STARTS_CYCLE_P (fence) && asm_p)
5388         advance_one_cycle (fence);
5389     }
5390 
5391   if (sched_verbose >= 2)
5392     debug_state (FENCE_STATE (fence));
5393   if (!DEBUG_INSN_P (insn))
5394     FENCE_STARTS_CYCLE_P (fence) = 0;
5395   FENCE_ISSUE_MORE (fence) = can_issue_more;
5396   return asm_p;
5397 }
5398 
5399 /* Update FENCE on which INSN was scheduled and this INSN, too.  NEED_STALL
5400    is nonzero if we need to stall after issuing INSN.  */
5401 static void
5402 update_fence_and_insn (fence_t fence, insn_t insn, int need_stall)
5403 {
5404   bool asm_p;
5405 
5406   /* First, reflect that something is scheduled on this fence.  */
5407   asm_p = advance_state_on_fence (fence, insn);
5408   FENCE_LAST_SCHEDULED_INSN (fence) = insn;
5409   VEC_safe_push (rtx, gc, FENCE_EXECUTING_INSNS (fence), insn);
5410   if (SCHED_GROUP_P (insn))
5411     {
5412       FENCE_SCHED_NEXT (fence) = INSN_SCHED_NEXT (insn);
5413       SCHED_GROUP_P (insn) = 0;
5414     }
5415   else
5416     FENCE_SCHED_NEXT (fence) = NULL_RTX;
5417   if (INSN_UID (insn) < FENCE_READY_TICKS_SIZE (fence))
5418     FENCE_READY_TICKS (fence) [INSN_UID (insn)] = 0;
5419 
5420   /* Set instruction scheduling info.  This will be used in bundling,
5421      pipelining, tick computations etc.  */
5422   ++INSN_SCHED_TIMES (insn);
5423   EXPR_TARGET_AVAILABLE (INSN_EXPR (insn)) = true;
5424   EXPR_ORIG_SCHED_CYCLE (INSN_EXPR (insn)) = FENCE_CYCLE (fence);
5425   INSN_AFTER_STALL_P (insn) = FENCE_AFTER_STALL_P (fence);
5426   INSN_SCHED_CYCLE (insn) = FENCE_CYCLE (fence);
5427 
5428   /* This does not account for adjust_cost hooks, just add the biggest
5429      constant the hook may add to the latency.  TODO: make this
5430      a target dependent constant.  */
5431   INSN_READY_CYCLE (insn)
5432     = INSN_SCHED_CYCLE (insn) + (INSN_CODE (insn) < 0
5433                                  ? 1
5434                                  : maximal_insn_latency (insn) + 1);
5435 
5436   /* Change these fields last, as they're used above.  */
5437   FENCE_AFTER_STALL_P (fence) = 0;
5438   if (asm_p || need_stall)
5439     advance_one_cycle (fence);
5440 
5441   /* Indicate that we've scheduled something on this fence.  */
5442   FENCE_SCHEDULED_P (fence) = true;
5443   scheduled_something_on_previous_fence = true;
5444 
5445   /* Print debug information when insn's fields are updated.  */
5446   if (sched_verbose >= 2)
5447     {
5448       sel_print ("Scheduling insn: ");
5449       dump_insn_1 (insn, 1);
5450       sel_print ("\n");
5451     }
5452 }
5453 
5454 /* Update boundary BND (and, if needed, FENCE) with INSN, remove the
5455    old boundary from BNDSP, add new boundaries to BNDS_TAIL_P and
5456    return it.  */
5457 static blist_t *
5458 update_boundaries (fence_t fence, bnd_t bnd, insn_t insn, blist_t *bndsp,
5459                    blist_t *bnds_tailp)
5460 {
5461   succ_iterator si;
5462   insn_t succ;
5463 
5464   advance_deps_context (BND_DC (bnd), insn);
5465   FOR_EACH_SUCC_1 (succ, si, insn,
5466                    SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
5467     {
5468       ilist_t ptr = ilist_copy (BND_PTR (bnd));
5469 
5470       ilist_add (&ptr, insn);
5471 
5472       if (DEBUG_INSN_P (insn) && sel_bb_end_p (insn)
5473 	  && is_ineligible_successor (succ, ptr))
5474 	{
5475 	  ilist_clear (&ptr);
5476 	  continue;
5477 	}
5478 
5479       if (FENCE_INSN (fence) == insn && !sel_bb_end_p (insn))
5480 	{
5481 	  if (sched_verbose >= 9)
5482 	    sel_print ("Updating fence insn from %i to %i\n",
5483 		       INSN_UID (insn), INSN_UID (succ));
5484 	  FENCE_INSN (fence) = succ;
5485 	}
5486       blist_add (bnds_tailp, succ, ptr, BND_DC (bnd));
5487       bnds_tailp = &BLIST_NEXT (*bnds_tailp);
5488     }
5489 
5490   blist_remove (bndsp);
5491   return bnds_tailp;
5492 }
5493 
5494 /* Schedule EXPR_VLIW on BND.  Return the insn emitted.  */
5495 static insn_t
5496 schedule_expr_on_boundary (bnd_t bnd, expr_t expr_vliw, int seqno)
5497 {
5498   av_set_t expr_seq;
5499   expr_t c_expr = XALLOCA (expr_def);
5500   insn_t place_to_insert;
5501   insn_t insn;
5502   bool should_move;
5503 
5504   expr_seq = find_sequential_best_exprs (bnd, expr_vliw, true);
5505 
5506   /* In case of scheduling a jump skipping some other instructions,
5507      prepare CFG.  After this, jump is at the boundary and can be
5508      scheduled as usual insn by MOVE_OP.  */
5509   if (vinsn_cond_branch_p (EXPR_VINSN (expr_vliw)))
5510     {
5511       insn = EXPR_INSN_RTX (expr_vliw);
5512 
5513       /* Speculative jumps are not handled.  */
5514       if (insn != BND_TO (bnd)
5515           && !sel_insn_is_speculation_check (insn))
5516         move_cond_jump (insn, bnd);
5517     }
5518 
5519   /* Find a place for C_EXPR to schedule.  */
5520   place_to_insert = prepare_place_to_insert (bnd);
5521   should_move = move_exprs_to_boundary (bnd, expr_vliw, expr_seq, c_expr);
5522   clear_expr (c_expr);
5523 
5524   /* Add the instruction.  The corner case to care about is when
5525      the expr_seq set has more than one expr, and we chose the one that
5526      is not equal to expr_vliw.  Then expr_vliw may be insn in stream, and
5527      we can't use it.  Generate the new vinsn.  */
5528   if (INSN_IN_STREAM_P (EXPR_INSN_RTX (expr_vliw)))
5529     {
5530       vinsn_t vinsn_new;
5531 
5532       vinsn_new = vinsn_copy (EXPR_VINSN (expr_vliw), false);
5533       change_vinsn_in_expr (expr_vliw, vinsn_new);
5534       should_move = false;
5535     }
5536   if (should_move)
5537     insn = sel_move_insn (expr_vliw, seqno, place_to_insert);
5538   else
5539     insn = emit_insn_from_expr_after (expr_vliw, NULL, seqno,
5540                                       place_to_insert);
5541 
5542   /* Return the nops generated for preserving of data sets back
5543      into pool.  */
5544   if (INSN_NOP_P (place_to_insert))
5545     return_nop_to_pool (place_to_insert, !DEBUG_INSN_P (insn));
5546   remove_temp_moveop_nops (!DEBUG_INSN_P (insn));
5547 
5548   av_set_clear (&expr_seq);
5549 
5550   /* Save the expression scheduled so to reset target availability if we'll
5551      meet it later on the same fence.  */
5552   if (EXPR_WAS_RENAMED (expr_vliw))
5553     vinsn_vec_add (&vec_target_unavailable_vinsns, INSN_EXPR (insn));
5554 
5555   /* Check that the recent movement didn't destroyed loop
5556      structure.  */
5557   gcc_assert (!pipelining_p
5558               || current_loop_nest == NULL
5559               || loop_latch_edge (current_loop_nest));
5560   return insn;
5561 }
5562 
5563 /* Stall for N cycles on FENCE.  */
5564 static void
5565 stall_for_cycles (fence_t fence, int n)
5566 {
5567   int could_more;
5568 
5569   could_more = n > 1 || FENCE_ISSUED_INSNS (fence) < issue_rate;
5570   while (n--)
5571     advance_one_cycle (fence);
5572   if (could_more)
5573     FENCE_AFTER_STALL_P (fence) = 1;
5574 }
5575 
5576 /* Gather a parallel group of insns at FENCE and assign their seqno
5577    to SEQNO.  All scheduled insns are gathered in SCHEDULED_INSNS_TAILPP
5578    list for later recalculation of seqnos.  */
5579 static void
5580 fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp)
5581 {
5582   blist_t bnds = NULL, *bnds_tailp;
5583   av_set_t av_vliw = NULL;
5584   insn_t insn = FENCE_INSN (fence);
5585 
5586   if (sched_verbose >= 2)
5587     sel_print ("Starting fill_insns for insn %d, cycle %d\n",
5588                INSN_UID (insn), FENCE_CYCLE (fence));
5589 
5590   blist_add (&bnds, insn, NULL, FENCE_DC (fence));
5591   bnds_tailp = &BLIST_NEXT (bnds);
5592   set_target_context (FENCE_TC (fence));
5593   can_issue_more = FENCE_ISSUE_MORE (fence);
5594   target_bb = INSN_BB (insn);
5595 
5596   /* Do while we can add any operation to the current group.  */
5597   do
5598     {
5599       blist_t *bnds_tailp1, *bndsp;
5600       expr_t expr_vliw;
5601       int need_stall = false;
5602       int was_stall = 0, scheduled_insns = 0;
5603       int max_insns = pipelining_p ? issue_rate : 2 * issue_rate;
5604       int max_stall = pipelining_p ? 1 : 3;
5605       bool last_insn_was_debug = false;
5606       bool was_debug_bb_end_p = false;
5607 
5608       compute_av_set_on_boundaries (fence, bnds, &av_vliw);
5609       remove_insns_that_need_bookkeeping (fence, &av_vliw);
5610       remove_insns_for_debug (bnds, &av_vliw);
5611 
5612       /* Return early if we have nothing to schedule.  */
5613       if (av_vliw == NULL)
5614         break;
5615 
5616       /* Choose the best expression and, if needed, destination register
5617 	 for it.  */
5618       do
5619         {
5620           expr_vliw = find_best_expr (&av_vliw, bnds, fence, &need_stall);
5621           if (! expr_vliw && need_stall)
5622             {
5623               /* All expressions required a stall.  Do not recompute av sets
5624                  as we'll get the same answer (modulo the insns between
5625                  the fence and its boundary, which will not be available for
5626                  pipelining).
5627 		 If we are going to stall for too long, break to recompute av
5628 		 sets and bring more insns for pipelining.  */
5629               was_stall++;
5630 	      if (need_stall <= 3)
5631 		stall_for_cycles (fence, need_stall);
5632 	      else
5633 		{
5634 		  stall_for_cycles (fence, 1);
5635 		  break;
5636 		}
5637             }
5638         }
5639       while (! expr_vliw && need_stall);
5640 
5641       /* Now either we've selected expr_vliw or we have nothing to schedule.  */
5642       if (!expr_vliw)
5643         {
5644 	  av_set_clear (&av_vliw);
5645           break;
5646         }
5647 
5648       bndsp = &bnds;
5649       bnds_tailp1 = bnds_tailp;
5650 
5651       do
5652 	/* This code will be executed only once until we'd have several
5653            boundaries per fence.  */
5654         {
5655 	  bnd_t bnd = BLIST_BND (*bndsp);
5656 
5657 	  if (!av_set_is_in_p (BND_AV1 (bnd), EXPR_VINSN (expr_vliw)))
5658 	    {
5659 	      bndsp = &BLIST_NEXT (*bndsp);
5660 	      continue;
5661 	    }
5662 
5663           insn = schedule_expr_on_boundary (bnd, expr_vliw, seqno);
5664 	  last_insn_was_debug = DEBUG_INSN_P (insn);
5665 	  if (last_insn_was_debug)
5666 	    was_debug_bb_end_p = (insn == BND_TO (bnd) && sel_bb_end_p (insn));
5667           update_fence_and_insn (fence, insn, need_stall);
5668           bnds_tailp = update_boundaries (fence, bnd, insn, bndsp, bnds_tailp);
5669 
5670 	  /* Add insn to the list of scheduled on this cycle instructions.  */
5671 	  ilist_add (*scheduled_insns_tailpp, insn);
5672 	  *scheduled_insns_tailpp = &ILIST_NEXT (**scheduled_insns_tailpp);
5673         }
5674       while (*bndsp != *bnds_tailp1);
5675 
5676       av_set_clear (&av_vliw);
5677       if (!last_insn_was_debug)
5678 	scheduled_insns++;
5679 
5680       /* We currently support information about candidate blocks only for
5681 	 one 'target_bb' block.  Hence we can't schedule after jump insn,
5682 	 as this will bring two boundaries and, hence, necessity to handle
5683 	 information for two or more blocks concurrently.  */
5684       if ((last_insn_was_debug ? was_debug_bb_end_p : sel_bb_end_p (insn))
5685           || (was_stall
5686               && (was_stall >= max_stall
5687                   || scheduled_insns >= max_insns)))
5688         break;
5689     }
5690   while (bnds);
5691 
5692   gcc_assert (!FENCE_BNDS (fence));
5693 
5694   /* Update boundaries of the FENCE.  */
5695   while (bnds)
5696     {
5697       ilist_t ptr = BND_PTR (BLIST_BND (bnds));
5698 
5699       if (ptr)
5700 	{
5701 	  insn = ILIST_INSN (ptr);
5702 
5703 	  if (!ilist_is_in_p (FENCE_BNDS (fence), insn))
5704 	    ilist_add (&FENCE_BNDS (fence), insn);
5705 	}
5706 
5707       blist_remove (&bnds);
5708     }
5709 
5710   /* Update target context on the fence.  */
5711   reset_target_context (FENCE_TC (fence), false);
5712 }
5713 
5714 /* All exprs in ORIG_OPS must have the same destination register or memory.
5715    Return that destination.  */
5716 static rtx
5717 get_dest_from_orig_ops (av_set_t orig_ops)
5718 {
5719   rtx dest = NULL_RTX;
5720   av_set_iterator av_it;
5721   expr_t expr;
5722   bool first_p = true;
5723 
5724   FOR_EACH_EXPR (expr, av_it, orig_ops)
5725     {
5726       rtx x = EXPR_LHS (expr);
5727 
5728       if (first_p)
5729 	{
5730 	  first_p = false;
5731 	  dest = x;
5732 	}
5733       else
5734 	gcc_assert (dest == x
5735 		    || (dest != NULL_RTX && x != NULL_RTX
5736 			&& rtx_equal_p (dest, x)));
5737     }
5738 
5739   return dest;
5740 }
5741 
5742 /* Update data sets for the bookkeeping block and record those expressions
5743    which become no longer available after inserting this bookkeeping.  */
5744 static void
5745 update_and_record_unavailable_insns (basic_block book_block)
5746 {
5747   av_set_iterator i;
5748   av_set_t old_av_set = NULL;
5749   expr_t cur_expr;
5750   rtx bb_end = sel_bb_end (book_block);
5751 
5752   /* First, get correct liveness in the bookkeeping block.  The problem is
5753      the range between the bookeeping insn and the end of block.  */
5754   update_liveness_on_insn (bb_end);
5755   if (control_flow_insn_p (bb_end))
5756     update_liveness_on_insn (PREV_INSN (bb_end));
5757 
5758   /* If there's valid av_set on BOOK_BLOCK, then there might exist another
5759      fence above, where we may choose to schedule an insn which is
5760      actually blocked from moving up with the bookkeeping we create here.  */
5761   if (AV_SET_VALID_P (sel_bb_head (book_block)))
5762     {
5763       old_av_set = av_set_copy (BB_AV_SET (book_block));
5764       update_data_sets (sel_bb_head (book_block));
5765 
5766       /* Traverse all the expressions in the old av_set and check whether
5767 	 CUR_EXPR is in new AV_SET.  */
5768       FOR_EACH_EXPR (cur_expr, i, old_av_set)
5769         {
5770           expr_t new_expr = av_set_lookup (BB_AV_SET (book_block),
5771 					   EXPR_VINSN (cur_expr));
5772 
5773           if (! new_expr
5774               /* In this case, we can just turn off the E_T_A bit, but we can't
5775                  represent this information with the current vector.  */
5776               || EXPR_TARGET_AVAILABLE (new_expr)
5777 		 != EXPR_TARGET_AVAILABLE (cur_expr))
5778 	    /* Unfortunately, the below code could be also fired up on
5779 	       separable insns, e.g. when moving insns through the new
5780 	       speculation check as in PR 53701.  */
5781             vinsn_vec_add (&vec_bookkeeping_blocked_vinsns, cur_expr);
5782         }
5783 
5784       av_set_clear (&old_av_set);
5785     }
5786 }
5787 
5788 /* The main effect of this function is that sparams->c_expr is merged
5789    with (or copied to) lparams->c_expr_merged.  If there's only one successor,
5790    we avoid merging anything by copying sparams->c_expr to lparams->c_expr_merged.
5791    lparams->c_expr_merged is copied back to sparams->c_expr after all
5792    successors has been traversed.  lparams->c_expr_local is an expr allocated
5793    on stack in the caller function, and is used if there is more than one
5794    successor.
5795 
5796    SUCC is one of the SUCCS_NORMAL successors of INSN,
5797    MOVEOP_DRV_CALL_RES is the result of call code_motion_path_driver on succ,
5798    LPARAMS and STATIC_PARAMS contain the parameters described above.  */
5799 static void
5800 move_op_merge_succs (insn_t insn ATTRIBUTE_UNUSED,
5801                      insn_t succ ATTRIBUTE_UNUSED,
5802 		     int moveop_drv_call_res,
5803 		     cmpd_local_params_p lparams, void *static_params)
5804 {
5805   moveop_static_params_p sparams = (moveop_static_params_p) static_params;
5806 
5807   /* Nothing to do, if original expr wasn't found below.  */
5808   if (moveop_drv_call_res != 1)
5809     return;
5810 
5811   /* If this is a first successor.  */
5812   if (!lparams->c_expr_merged)
5813     {
5814       lparams->c_expr_merged = sparams->c_expr;
5815       sparams->c_expr = lparams->c_expr_local;
5816     }
5817   else
5818     {
5819       /* We must merge all found expressions to get reasonable
5820 	 EXPR_SPEC_DONE_DS for the resulting insn.  If we don't
5821 	 do so then we can first find the expr with epsilon
5822 	 speculation success probability and only then with the
5823 	 good probability.  As a result the insn will get epsilon
5824 	 probability and will never be scheduled because of
5825 	 weakness_cutoff in find_best_expr.
5826 
5827 	 We call merge_expr_data here instead of merge_expr
5828 	 because due to speculation C_EXPR and X may have the
5829 	 same insns with different speculation types.  And as of
5830 	 now such insns are considered non-equal.
5831 
5832 	 However, EXPR_SCHED_TIMES is different -- we must get
5833 	 SCHED_TIMES from a real insn, not a bookkeeping copy.
5834 	 We force this here.  Instead, we may consider merging
5835 	 SCHED_TIMES to the maximum instead of minimum in the
5836 	 below function.  */
5837       int old_times = EXPR_SCHED_TIMES (lparams->c_expr_merged);
5838 
5839       merge_expr_data (lparams->c_expr_merged, sparams->c_expr, NULL);
5840       if (EXPR_SCHED_TIMES (sparams->c_expr) == 0)
5841 	EXPR_SCHED_TIMES (lparams->c_expr_merged) = old_times;
5842 
5843       clear_expr (sparams->c_expr);
5844     }
5845 }
5846 
5847 /*  Add used regs for the successor SUCC into SPARAMS->USED_REGS.
5848 
5849    SUCC is one of the SUCCS_NORMAL successors of INSN,
5850    MOVEOP_DRV_CALL_RES is the result of call code_motion_path_driver on succ or 0,
5851      if SUCC is one of SUCCS_BACK or SUCCS_OUT.
5852    STATIC_PARAMS contain USED_REGS set.  */
5853 static void
5854 fur_merge_succs (insn_t insn ATTRIBUTE_UNUSED, insn_t succ,
5855 		 int moveop_drv_call_res,
5856 		 cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
5857 		 void *static_params)
5858 {
5859   regset succ_live;
5860   fur_static_params_p sparams = (fur_static_params_p) static_params;
5861 
5862   /* Here we compute live regsets only for branches that do not lie
5863      on the code motion paths.  These branches correspond to value
5864      MOVEOP_DRV_CALL_RES==0 and include SUCCS_BACK and SUCCS_OUT, though
5865      for such branches code_motion_path_driver is not called.  */
5866   if (moveop_drv_call_res != 0)
5867     return;
5868 
5869   /* Mark all registers that do not meet the following condition:
5870      (3) not live on the other path of any conditional branch
5871      that is passed by the operation, in case original
5872      operations are not present on both paths of the
5873      conditional branch.  */
5874   succ_live = compute_live (succ);
5875   IOR_REG_SET (sparams->used_regs, succ_live);
5876 }
5877 
5878 /* This function is called after the last successor.  Copies LP->C_EXPR_MERGED
5879    into SP->CEXPR.  */
5880 static void
5881 move_op_after_merge_succs (cmpd_local_params_p lp, void *sparams)
5882 {
5883   moveop_static_params_p sp = (moveop_static_params_p) sparams;
5884 
5885   sp->c_expr = lp->c_expr_merged;
5886 }
5887 
5888 /* Track bookkeeping copies created, insns scheduled, and blocks for
5889    rescheduling when INSN is found by move_op.  */
5890 static void
5891 track_scheduled_insns_and_blocks (rtx insn)
5892 {
5893   /* Even if this insn can be a copy that will be removed during current move_op,
5894      we still need to count it as an originator.  */
5895   bitmap_set_bit (current_originators, INSN_UID (insn));
5896 
5897   if (!bitmap_clear_bit (current_copies, INSN_UID (insn)))
5898     {
5899       /* Note that original block needs to be rescheduled, as we pulled an
5900 	 instruction out of it.  */
5901       if (INSN_SCHED_TIMES (insn) > 0)
5902 	bitmap_set_bit (blocks_to_reschedule, BLOCK_FOR_INSN (insn)->index);
5903       else if (INSN_UID (insn) < first_emitted_uid && !DEBUG_INSN_P (insn))
5904 	num_insns_scheduled++;
5905     }
5906 
5907   /* For instructions we must immediately remove insn from the
5908      stream, so subsequent update_data_sets () won't include this
5909      insn into av_set.
5910      For expr we must make insn look like "INSN_REG (insn) := c_expr".  */
5911   if (INSN_UID (insn) > max_uid_before_move_op)
5912     stat_bookkeeping_copies--;
5913 }
5914 
5915 /* Emit a register-register copy for INSN if needed.  Return true if
5916    emitted one.  PARAMS is the move_op static parameters.  */
5917 static bool
5918 maybe_emit_renaming_copy (rtx insn,
5919                           moveop_static_params_p params)
5920 {
5921   bool insn_emitted  = false;
5922   rtx cur_reg;
5923 
5924   /* Bail out early when expression can not be renamed at all.  */
5925   if (!EXPR_SEPARABLE_P (params->c_expr))
5926     return false;
5927 
5928   cur_reg = expr_dest_reg (params->c_expr);
5929   gcc_assert (cur_reg && params->dest && REG_P (params->dest));
5930 
5931   /* If original operation has expr and the register chosen for
5932      that expr is not original operation's dest reg, substitute
5933      operation's right hand side with the register chosen.  */
5934   if (REGNO (params->dest) != REGNO (cur_reg))
5935     {
5936       insn_t reg_move_insn, reg_move_insn_rtx;
5937 
5938       reg_move_insn_rtx = create_insn_rtx_with_rhs (INSN_VINSN (insn),
5939                                                     params->dest);
5940       reg_move_insn = sel_gen_insn_from_rtx_after (reg_move_insn_rtx,
5941                                                    INSN_EXPR (insn),
5942                                                    INSN_SEQNO (insn),
5943                                                    insn);
5944       EXPR_SPEC_DONE_DS (INSN_EXPR (reg_move_insn)) = 0;
5945       replace_dest_with_reg_in_expr (params->c_expr, params->dest);
5946 
5947       insn_emitted = true;
5948       params->was_renamed = true;
5949     }
5950 
5951   return insn_emitted;
5952 }
5953 
5954 /* Emit a speculative check for INSN speculated as EXPR if needed.
5955    Return true if we've  emitted one.  PARAMS is the move_op static
5956    parameters.  */
5957 static bool
5958 maybe_emit_speculative_check (rtx insn, expr_t expr,
5959                               moveop_static_params_p params)
5960 {
5961   bool insn_emitted = false;
5962   insn_t x;
5963   ds_t check_ds;
5964 
5965   check_ds = get_spec_check_type_for_insn (insn, expr);
5966   if (check_ds != 0)
5967     {
5968       /* A speculation check should be inserted.  */
5969       x = create_speculation_check (params->c_expr, check_ds, insn);
5970       insn_emitted = true;
5971     }
5972   else
5973     {
5974       EXPR_SPEC_DONE_DS (INSN_EXPR (insn)) = 0;
5975       x = insn;
5976     }
5977 
5978   gcc_assert (EXPR_SPEC_DONE_DS (INSN_EXPR (x)) == 0
5979               && EXPR_SPEC_TO_CHECK_DS (INSN_EXPR (x)) == 0);
5980   return insn_emitted;
5981 }
5982 
5983 /* Handle transformations that leave an insn in place of original
5984    insn such as renaming/speculation.  Return true if one of such
5985    transformations actually happened, and we have emitted this insn.  */
5986 static bool
5987 handle_emitting_transformations (rtx insn, expr_t expr,
5988                                  moveop_static_params_p params)
5989 {
5990   bool insn_emitted = false;
5991 
5992   insn_emitted = maybe_emit_renaming_copy (insn, params);
5993   insn_emitted |= maybe_emit_speculative_check (insn, expr, params);
5994 
5995   return insn_emitted;
5996 }
5997 
5998 /* If INSN is the only insn in the basic block (not counting JUMP,
5999    which may be a jump to next insn, and DEBUG_INSNs), we want to
6000    leave a NOP there till the return to fill_insns.  */
6001 
6002 static bool
6003 need_nop_to_preserve_insn_bb (rtx insn)
6004 {
6005   insn_t bb_head, bb_end, bb_next, in_next;
6006   basic_block bb = BLOCK_FOR_INSN (insn);
6007 
6008   bb_head = sel_bb_head (bb);
6009   bb_end = sel_bb_end (bb);
6010 
6011   if (bb_head == bb_end)
6012     return true;
6013 
6014   while (bb_head != bb_end && DEBUG_INSN_P (bb_head))
6015     bb_head = NEXT_INSN (bb_head);
6016 
6017   if (bb_head == bb_end)
6018     return true;
6019 
6020   while (bb_head != bb_end && DEBUG_INSN_P (bb_end))
6021     bb_end = PREV_INSN (bb_end);
6022 
6023   if (bb_head == bb_end)
6024     return true;
6025 
6026   bb_next = NEXT_INSN (bb_head);
6027   while (bb_next != bb_end && DEBUG_INSN_P (bb_next))
6028     bb_next = NEXT_INSN (bb_next);
6029 
6030   if (bb_next == bb_end && JUMP_P (bb_end))
6031     return true;
6032 
6033   in_next = NEXT_INSN (insn);
6034   while (DEBUG_INSN_P (in_next))
6035     in_next = NEXT_INSN (in_next);
6036 
6037   if (IN_CURRENT_FENCE_P (in_next))
6038     return true;
6039 
6040   return false;
6041 }
6042 
6043 /* Remove INSN from stream.  When ONLY_DISCONNECT is true, its data
6044    is not removed but reused when INSN is re-emitted.  */
6045 static void
6046 remove_insn_from_stream (rtx insn, bool only_disconnect)
6047 {
6048   /* If there's only one insn in the BB, make sure that a nop is
6049      inserted into it, so the basic block won't disappear when we'll
6050      delete INSN below with sel_remove_insn. It should also survive
6051      till the return to fill_insns.  */
6052   if (need_nop_to_preserve_insn_bb (insn))
6053     {
6054       insn_t nop = get_nop_from_pool (insn);
6055       gcc_assert (INSN_NOP_P (nop));
6056       VEC_safe_push (insn_t, heap, vec_temp_moveop_nops, nop);
6057     }
6058 
6059   sel_remove_insn (insn, only_disconnect, false);
6060 }
6061 
6062 /* This function is called when original expr is found.
6063    INSN - current insn traversed, EXPR - the corresponding expr found.
6064    LPARAMS is the local parameters of code modion driver, STATIC_PARAMS
6065    is static parameters of move_op.  */
6066 static void
6067 move_op_orig_expr_found (insn_t insn, expr_t expr,
6068                          cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
6069                          void *static_params)
6070 {
6071   bool only_disconnect, insn_emitted;
6072   moveop_static_params_p params = (moveop_static_params_p) static_params;
6073 
6074   copy_expr_onside (params->c_expr, INSN_EXPR (insn));
6075   track_scheduled_insns_and_blocks (insn);
6076   insn_emitted = handle_emitting_transformations (insn, expr, params);
6077   only_disconnect = (params->uid == INSN_UID (insn)
6078                      && ! insn_emitted  && ! EXPR_WAS_CHANGED (expr));
6079 
6080   /* Mark that we've disconnected an insn.  */
6081   if (only_disconnect)
6082     params->uid = -1;
6083   remove_insn_from_stream (insn, only_disconnect);
6084 }
6085 
6086 /* The function is called when original expr is found.
6087    INSN - current insn traversed, EXPR - the corresponding expr found,
6088    crosses_call and original_insns in STATIC_PARAMS are updated.  */
6089 static void
6090 fur_orig_expr_found (insn_t insn, expr_t expr ATTRIBUTE_UNUSED,
6091                      cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
6092                      void *static_params)
6093 {
6094   fur_static_params_p params = (fur_static_params_p) static_params;
6095   regset tmp;
6096 
6097   if (CALL_P (insn))
6098     params->crosses_call = true;
6099 
6100   def_list_add (params->original_insns, insn, params->crosses_call);
6101 
6102   /* Mark the registers that do not meet the following condition:
6103     (2) not among the live registers of the point
6104 	immediately following the first original operation on
6105 	a given downward path, except for the original target
6106 	register of the operation.  */
6107   tmp = get_clear_regset_from_pool ();
6108   compute_live_below_insn (insn, tmp);
6109   AND_COMPL_REG_SET (tmp, INSN_REG_SETS (insn));
6110   AND_COMPL_REG_SET (tmp, INSN_REG_CLOBBERS (insn));
6111   IOR_REG_SET (params->used_regs, tmp);
6112   return_regset_to_pool (tmp);
6113 
6114   /* (*1) We need to add to USED_REGS registers that are read by
6115      INSN's lhs. This may lead to choosing wrong src register.
6116      E.g. (scheduling const expr enabled):
6117 
6118 	429: ax=0x0	<- Can't use AX for this expr (0x0)
6119 	433: dx=[bp-0x18]
6120 	427: [ax+dx+0x1]=ax
6121 	  REG_DEAD: ax
6122 	168: di=dx
6123 	  REG_DEAD: dx
6124      */
6125   /* FIXME: see comment above and enable MEM_P
6126      in vinsn_separable_p.  */
6127   gcc_assert (!VINSN_SEPARABLE_P (INSN_VINSN (insn))
6128 	      || !MEM_P (INSN_LHS (insn)));
6129 }
6130 
6131 /* This function is called on the ascending pass, before returning from
6132    current basic block.  */
6133 static void
6134 move_op_at_first_insn (insn_t insn, cmpd_local_params_p lparams,
6135                        void *static_params)
6136 {
6137   moveop_static_params_p sparams = (moveop_static_params_p) static_params;
6138   basic_block book_block = NULL;
6139 
6140   /* When we have removed the boundary insn for scheduling, which also
6141      happened to be the end insn in its bb, we don't need to update sets.  */
6142   if (!lparams->removed_last_insn
6143       && lparams->e1
6144       && sel_bb_head_p (insn))
6145     {
6146       /* We should generate bookkeeping code only if we are not at the
6147          top level of the move_op.  */
6148       if (sel_num_cfg_preds_gt_1 (insn))
6149         book_block = generate_bookkeeping_insn (sparams->c_expr,
6150                                                 lparams->e1, lparams->e2);
6151       /* Update data sets for the current insn.  */
6152       update_data_sets (insn);
6153     }
6154 
6155   /* If bookkeeping code was inserted, we need to update av sets of basic
6156      block that received bookkeeping.  After generation of bookkeeping insn,
6157      bookkeeping block does not contain valid av set because we are not following
6158      the original algorithm in every detail with regards to e.g. renaming
6159      simple reg-reg copies.  Consider example:
6160 
6161      bookkeeping block           scheduling fence
6162      \            /
6163       \    join  /
6164        ----------
6165        |        |
6166        ----------
6167       /           \
6168      /             \
6169      r1 := r2          r1 := r3
6170 
6171      We try to schedule insn "r1 := r3" on the current
6172      scheduling fence.  Also, note that av set of bookkeeping block
6173      contain both insns "r1 := r2" and "r1 := r3".  When the insn has
6174      been scheduled, the CFG is as follows:
6175 
6176      r1 := r3               r1 := r3
6177      bookkeeping block           scheduling fence
6178      \            /
6179       \    join  /
6180        ----------
6181        |        |
6182        ----------
6183       /          \
6184      /            \
6185      r1 := r2
6186 
6187      Here, insn "r1 := r3" was scheduled at the current scheduling point
6188      and bookkeeping code was generated at the bookeeping block.  This
6189      way insn "r1 := r2" is no longer available as a whole instruction
6190      (but only as expr) ahead of insn "r1 := r3" in bookkeeping block.
6191      This situation is handled by calling update_data_sets.
6192 
6193      Since update_data_sets is called only on the bookkeeping block, and
6194      it also may have predecessors with av_sets, containing instructions that
6195      are no longer available, we save all such expressions that become
6196      unavailable during data sets update on the bookkeeping block in
6197      VEC_BOOKKEEPING_BLOCKED_VINSNS.  Later we avoid selecting such
6198      expressions for scheduling.  This allows us to avoid recomputation of
6199      av_sets outside the code motion path.  */
6200 
6201   if (book_block)
6202     update_and_record_unavailable_insns (book_block);
6203 
6204   /* If INSN was previously marked for deletion, it's time to do it.  */
6205   if (lparams->removed_last_insn)
6206     insn = PREV_INSN (insn);
6207 
6208   /* Do not tidy control flow at the topmost moveop, as we can erroneously
6209      kill a block with a single nop in which the insn should be emitted.  */
6210   if (lparams->e1)
6211     tidy_control_flow (BLOCK_FOR_INSN (insn), true);
6212 }
6213 
6214 /* This function is called on the ascending pass, before returning from the
6215    current basic block.  */
6216 static void
6217 fur_at_first_insn (insn_t insn,
6218                    cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
6219                    void *static_params ATTRIBUTE_UNUSED)
6220 {
6221   gcc_assert (!sel_bb_head_p (insn) || AV_SET_VALID_P (insn)
6222 	      || AV_LEVEL (insn) == -1);
6223 }
6224 
6225 /* Called on the backward stage of recursion to call moveup_expr for insn
6226    and sparams->c_expr.  */
6227 static void
6228 move_op_ascend (insn_t insn, void *static_params)
6229 {
6230   enum MOVEUP_EXPR_CODE res;
6231   moveop_static_params_p sparams = (moveop_static_params_p) static_params;
6232 
6233   if (! INSN_NOP_P (insn))
6234     {
6235       res = moveup_expr_cached (sparams->c_expr, insn, false);
6236       gcc_assert (res != MOVEUP_EXPR_NULL);
6237     }
6238 
6239   /* Update liveness for this insn as it was invalidated.  */
6240   update_liveness_on_insn (insn);
6241 }
6242 
6243 /* This function is called on enter to the basic block.
6244    Returns TRUE if this block already have been visited and
6245    code_motion_path_driver should return 1, FALSE otherwise.  */
6246 static int
6247 fur_on_enter (insn_t insn ATTRIBUTE_UNUSED, cmpd_local_params_p local_params,
6248 	      void *static_params, bool visited_p)
6249 {
6250   fur_static_params_p sparams = (fur_static_params_p) static_params;
6251 
6252   if (visited_p)
6253     {
6254       /* If we have found something below this block, there should be at
6255 	 least one insn in ORIGINAL_INSNS.  */
6256       gcc_assert (*sparams->original_insns);
6257 
6258       /* Adjust CROSSES_CALL, since we may have come to this block along
6259 	 different path.  */
6260       DEF_LIST_DEF (*sparams->original_insns)->crosses_call
6261 	  |= sparams->crosses_call;
6262     }
6263   else
6264     local_params->old_original_insns = *sparams->original_insns;
6265 
6266   return 1;
6267 }
6268 
6269 /* Same as above but for move_op.   */
6270 static int
6271 move_op_on_enter (insn_t insn ATTRIBUTE_UNUSED,
6272                   cmpd_local_params_p local_params ATTRIBUTE_UNUSED,
6273                   void *static_params ATTRIBUTE_UNUSED, bool visited_p)
6274 {
6275   if (visited_p)
6276     return -1;
6277   return 1;
6278 }
6279 
6280 /* This function is called while descending current basic block if current
6281    insn is not the original EXPR we're searching for.
6282 
6283    Return value: FALSE, if code_motion_path_driver should perform a local
6284 			cleanup and return 0 itself;
6285 		 TRUE, if code_motion_path_driver should continue.  */
6286 static bool
6287 move_op_orig_expr_not_found (insn_t insn, av_set_t orig_ops ATTRIBUTE_UNUSED,
6288 			    void *static_params)
6289 {
6290   moveop_static_params_p sparams = (moveop_static_params_p) static_params;
6291 
6292 #ifdef ENABLE_CHECKING
6293   sparams->failed_insn = insn;
6294 #endif
6295 
6296   /* If we're scheduling separate expr, in order to generate correct code
6297      we need to stop the search at bookkeeping code generated with the
6298      same destination register or memory.  */
6299   if (lhs_of_insn_equals_to_dest_p (insn, sparams->dest))
6300     return false;
6301   return true;
6302 }
6303 
6304 /* This function is called while descending current basic block if current
6305    insn is not the original EXPR we're searching for.
6306 
6307    Return value: TRUE (code_motion_path_driver should continue).  */
6308 static bool
6309 fur_orig_expr_not_found (insn_t insn, av_set_t orig_ops, void *static_params)
6310 {
6311   bool mutexed;
6312   expr_t r;
6313   av_set_iterator avi;
6314   fur_static_params_p sparams = (fur_static_params_p) static_params;
6315 
6316   if (CALL_P (insn))
6317     sparams->crosses_call = true;
6318   else if (DEBUG_INSN_P (insn))
6319     return true;
6320 
6321   /* If current insn we are looking at cannot be executed together
6322      with original insn, then we can skip it safely.
6323 
6324      Example: ORIG_OPS = { (p6) r14 = sign_extend (r15); }
6325 	      INSN = (!p6) r14 = r14 + 1;
6326 
6327      Here we can schedule ORIG_OP with lhs = r14, though only
6328      looking at the set of used and set registers of INSN we must
6329      forbid it.  So, add set/used in INSN registers to the
6330      untouchable set only if there is an insn in ORIG_OPS that can
6331      affect INSN.  */
6332   mutexed = true;
6333   FOR_EACH_EXPR (r, avi, orig_ops)
6334     if (!sched_insns_conditions_mutex_p (insn, EXPR_INSN_RTX (r)))
6335       {
6336 	mutexed = false;
6337 	break;
6338       }
6339 
6340   /* Mark all registers that do not meet the following condition:
6341      (1) Not set or read on any path from xi to an instance of the
6342 	 original operation.  */
6343   if (!mutexed)
6344     {
6345       IOR_REG_SET (sparams->used_regs, INSN_REG_SETS (insn));
6346       IOR_REG_SET (sparams->used_regs, INSN_REG_USES (insn));
6347       IOR_REG_SET (sparams->used_regs, INSN_REG_CLOBBERS (insn));
6348     }
6349 
6350   return true;
6351 }
6352 
6353 /* Hooks and data to perform move_op operations with code_motion_path_driver.  */
6354 struct code_motion_path_driver_info_def move_op_hooks = {
6355   move_op_on_enter,
6356   move_op_orig_expr_found,
6357   move_op_orig_expr_not_found,
6358   move_op_merge_succs,
6359   move_op_after_merge_succs,
6360   move_op_ascend,
6361   move_op_at_first_insn,
6362   SUCCS_NORMAL,
6363   "move_op"
6364 };
6365 
6366 /* Hooks and data to perform find_used_regs operations
6367    with code_motion_path_driver.  */
6368 struct code_motion_path_driver_info_def fur_hooks = {
6369   fur_on_enter,
6370   fur_orig_expr_found,
6371   fur_orig_expr_not_found,
6372   fur_merge_succs,
6373   NULL, /* fur_after_merge_succs */
6374   NULL, /* fur_ascend */
6375   fur_at_first_insn,
6376   SUCCS_ALL,
6377   "find_used_regs"
6378 };
6379 
6380 /* Traverse all successors of INSN.  For each successor that is SUCCS_NORMAL
6381    code_motion_path_driver is called recursively.  Original operation
6382    was found at least on one path that is starting with one of INSN's
6383    successors (this fact is asserted).  ORIG_OPS is expressions we're looking
6384    for, PATH is the path we've traversed, STATIC_PARAMS is the parameters
6385    of either move_op or find_used_regs depending on the caller.
6386 
6387    Return 0 if we haven't found expression, 1 if we found it, -1 if we don't
6388    know for sure at this point.  */
6389 static int
6390 code_motion_process_successors (insn_t insn, av_set_t orig_ops,
6391                                 ilist_t path, void *static_params)
6392 {
6393   int res = 0;
6394   succ_iterator succ_i;
6395   rtx succ;
6396   basic_block bb;
6397   int old_index;
6398   unsigned old_succs;
6399 
6400   struct cmpd_local_params lparams;
6401   expr_def _x;
6402 
6403   lparams.c_expr_local = &_x;
6404   lparams.c_expr_merged = NULL;
6405 
6406   /* We need to process only NORMAL succs for move_op, and collect live
6407      registers from ALL branches (including those leading out of the
6408      region) for find_used_regs.
6409 
6410      In move_op, there can be a case when insn's bb number has changed
6411      due to created bookkeeping.  This happens very rare, as we need to
6412      move expression from the beginning to the end of the same block.
6413      Rescan successors in this case.  */
6414 
6415  rescan:
6416   bb = BLOCK_FOR_INSN (insn);
6417   old_index = bb->index;
6418   old_succs = EDGE_COUNT (bb->succs);
6419 
6420   FOR_EACH_SUCC_1 (succ, succ_i, insn, code_motion_path_driver_info->succ_flags)
6421     {
6422       int b;
6423 
6424       lparams.e1 = succ_i.e1;
6425       lparams.e2 = succ_i.e2;
6426 
6427       /* Go deep into recursion only for NORMAL edges (non-backedges within the
6428 	 current region).  */
6429       if (succ_i.current_flags == SUCCS_NORMAL)
6430 	b = code_motion_path_driver (succ, orig_ops, path, &lparams,
6431 				     static_params);
6432       else
6433 	b = 0;
6434 
6435       /* Merge c_expres found or unify live register sets from different
6436 	 successors.  */
6437       code_motion_path_driver_info->merge_succs (insn, succ, b, &lparams,
6438 						 static_params);
6439       if (b == 1)
6440         res = b;
6441       else if (b == -1 && res != 1)
6442         res = b;
6443 
6444       /* We have simplified the control flow below this point.  In this case,
6445          the iterator becomes invalid.  We need to try again.  */
6446       if (BLOCK_FOR_INSN (insn)->index != old_index
6447           || EDGE_COUNT (bb->succs) != old_succs)
6448         {
6449           insn = sel_bb_end (BLOCK_FOR_INSN (insn));
6450           goto rescan;
6451         }
6452     }
6453 
6454 #ifdef ENABLE_CHECKING
6455   /* Here, RES==1 if original expr was found at least for one of the
6456      successors.  After the loop, RES may happen to have zero value
6457      only if at some point the expr searched is present in av_set, but is
6458      not found below.  In most cases, this situation is an error.
6459      The exception is when the original operation is blocked by
6460      bookkeeping generated for another fence or for another path in current
6461      move_op.  */
6462   gcc_assert (res == 1
6463 	      || (res == 0
6464 		  && av_set_could_be_blocked_by_bookkeeping_p (orig_ops,
6465 							       static_params))
6466 	      || res == -1);
6467 #endif
6468 
6469   /* Merge data, clean up, etc.  */
6470   if (res != -1 && code_motion_path_driver_info->after_merge_succs)
6471     code_motion_path_driver_info->after_merge_succs (&lparams, static_params);
6472 
6473   return res;
6474 }
6475 
6476 
6477 /* Perform a cleanup when the driver is about to terminate.  ORIG_OPS_P
6478    is the pointer to the av set with expressions we were looking for,
6479    PATH_P is the pointer to the traversed path.  */
6480 static inline void
6481 code_motion_path_driver_cleanup (av_set_t *orig_ops_p, ilist_t *path_p)
6482 {
6483   ilist_remove (path_p);
6484   av_set_clear (orig_ops_p);
6485 }
6486 
6487 /* The driver function that implements move_op or find_used_regs
6488    functionality dependent whether code_motion_path_driver_INFO is set to
6489    &MOVE_OP_HOOKS or &FUR_HOOKS.  This function implements the common parts
6490    of code (CFG traversal etc) that are shared among both functions.  INSN
6491    is the insn we're starting the search from, ORIG_OPS are the expressions
6492    we're searching for, PATH is traversed path, LOCAL_PARAMS_IN are local
6493    parameters of the driver, and STATIC_PARAMS are static parameters of
6494    the caller.
6495 
6496    Returns whether original instructions were found.  Note that top-level
6497    code_motion_path_driver always returns true.  */
6498 static int
6499 code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
6500 			 cmpd_local_params_p local_params_in,
6501 			 void *static_params)
6502 {
6503   expr_t expr = NULL;
6504   basic_block bb = BLOCK_FOR_INSN (insn);
6505   insn_t first_insn, bb_tail, before_first;
6506   bool removed_last_insn = false;
6507 
6508   if (sched_verbose >= 6)
6509     {
6510       sel_print ("%s (", code_motion_path_driver_info->routine_name);
6511       dump_insn (insn);
6512       sel_print (",");
6513       dump_av_set (orig_ops);
6514       sel_print (")\n");
6515     }
6516 
6517   gcc_assert (orig_ops);
6518 
6519   /* If no original operations exist below this insn, return immediately.  */
6520   if (is_ineligible_successor (insn, path))
6521     {
6522       if (sched_verbose >= 6)
6523         sel_print ("Insn %d is ineligible successor\n", INSN_UID (insn));
6524       return false;
6525     }
6526 
6527   /* The block can have invalid av set, in which case it was created earlier
6528      during move_op.  Return immediately.  */
6529   if (sel_bb_head_p (insn))
6530     {
6531       if (! AV_SET_VALID_P (insn))
6532         {
6533           if (sched_verbose >= 6)
6534             sel_print ("Returned from block %d as it had invalid av set\n",
6535                        bb->index);
6536           return false;
6537         }
6538 
6539       if (bitmap_bit_p (code_motion_visited_blocks, bb->index))
6540         {
6541           /* We have already found an original operation on this branch, do not
6542              go any further and just return TRUE here.  If we don't stop here,
6543              function can have exponential behaviour even on the small code
6544              with many different paths (e.g. with data speculation and
6545              recovery blocks).  */
6546           if (sched_verbose >= 6)
6547             sel_print ("Block %d already visited in this traversal\n", bb->index);
6548           if (code_motion_path_driver_info->on_enter)
6549             return code_motion_path_driver_info->on_enter (insn,
6550                                                            local_params_in,
6551                                                            static_params,
6552                                                            true);
6553         }
6554     }
6555 
6556   if (code_motion_path_driver_info->on_enter)
6557     code_motion_path_driver_info->on_enter (insn, local_params_in,
6558                                             static_params, false);
6559   orig_ops = av_set_copy (orig_ops);
6560 
6561   /* Filter the orig_ops set.  */
6562   if (AV_SET_VALID_P (insn))
6563     av_set_code_motion_filter (&orig_ops, AV_SET (insn));
6564 
6565   /* If no more original ops, return immediately.  */
6566   if (!orig_ops)
6567     {
6568       if (sched_verbose >= 6)
6569         sel_print ("No intersection with av set of block %d\n", bb->index);
6570       return false;
6571     }
6572 
6573   /* For non-speculative insns we have to leave only one form of the
6574      original operation, because if we don't, we may end up with
6575      different C_EXPRes and, consequently, with bookkeepings for different
6576      expression forms along the same code motion path.  That may lead to
6577      generation of incorrect code.  So for each code motion we stick to
6578      the single form of the instruction,  except for speculative insns
6579      which we need to keep in different forms with all speculation
6580      types.  */
6581   av_set_leave_one_nonspec (&orig_ops);
6582 
6583   /* It is not possible that all ORIG_OPS are filtered out.  */
6584   gcc_assert (orig_ops);
6585 
6586   /* It is enough to place only heads and tails of visited basic blocks into
6587      the PATH.  */
6588   ilist_add (&path, insn);
6589   first_insn = insn;
6590   bb_tail = sel_bb_end (bb);
6591 
6592   /* Descend the basic block in search of the original expr; this part
6593      corresponds to the part of the original move_op procedure executed
6594      before the recursive call.  */
6595   for (;;)
6596     {
6597       /* Look at the insn and decide if it could be an ancestor of currently
6598 	 scheduling operation.  If it is so, then the insn "dest = op" could
6599 	 either be replaced with "dest = reg", because REG now holds the result
6600 	 of OP, or just removed, if we've scheduled the insn as a whole.
6601 
6602 	 If this insn doesn't contain currently scheduling OP, then proceed
6603 	 with searching and look at its successors.  Operations we're searching
6604 	 for could have changed when moving up through this insn via
6605 	 substituting.  In this case, perform unsubstitution on them first.
6606 
6607 	 When traversing the DAG below this insn is finished, insert
6608 	 bookkeeping code, if the insn is a joint point, and remove
6609 	 leftovers.  */
6610 
6611       expr = av_set_lookup (orig_ops, INSN_VINSN (insn));
6612       if (expr)
6613 	{
6614 	  insn_t last_insn = PREV_INSN (insn);
6615 
6616 	  /* We have found the original operation.   */
6617           if (sched_verbose >= 6)
6618             sel_print ("Found original operation at insn %d\n", INSN_UID (insn));
6619 
6620 	  code_motion_path_driver_info->orig_expr_found
6621             (insn, expr, local_params_in, static_params);
6622 
6623 	  /* Step back, so on the way back we'll start traversing from the
6624 	     previous insn (or we'll see that it's bb_note and skip that
6625 	     loop).  */
6626           if (insn == first_insn)
6627             {
6628               first_insn = NEXT_INSN (last_insn);
6629               removed_last_insn = sel_bb_end_p (last_insn);
6630             }
6631 	  insn = last_insn;
6632 	  break;
6633 	}
6634       else
6635 	{
6636 	  /* We haven't found the original expr, continue descending the basic
6637 	     block.  */
6638 	  if (code_motion_path_driver_info->orig_expr_not_found
6639               (insn, orig_ops, static_params))
6640 	    {
6641 	      /* Av set ops could have been changed when moving through this
6642 	         insn.  To find them below it, we have to un-substitute them.  */
6643 	      undo_transformations (&orig_ops, insn);
6644 	    }
6645 	  else
6646 	    {
6647 	      /* Clean up and return, if the hook tells us to do so.  It may
6648 		 happen if we've encountered the previously created
6649 		 bookkeeping.  */
6650 	      code_motion_path_driver_cleanup (&orig_ops, &path);
6651 	      return -1;
6652 	    }
6653 
6654 	  gcc_assert (orig_ops);
6655         }
6656 
6657       /* Stop at insn if we got to the end of BB.  */
6658       if (insn == bb_tail)
6659 	break;
6660 
6661       insn = NEXT_INSN (insn);
6662     }
6663 
6664   /* Here INSN either points to the insn before the original insn (may be
6665      bb_note, if original insn was a bb_head) or to the bb_end.  */
6666   if (!expr)
6667     {
6668       int res;
6669       rtx last_insn = PREV_INSN (insn);
6670       bool added_to_path;
6671 
6672       gcc_assert (insn == sel_bb_end (bb));
6673 
6674       /* Add bb tail to PATH (but it doesn't make any sense if it's a bb_head -
6675 	 it's already in PATH then).  */
6676       if (insn != first_insn)
6677 	{
6678 	  ilist_add (&path, insn);
6679 	  added_to_path = true;
6680 	}
6681       else
6682         added_to_path = false;
6683 
6684       /* Process_successors should be able to find at least one
6685 	 successor for which code_motion_path_driver returns TRUE.  */
6686       res = code_motion_process_successors (insn, orig_ops,
6687                                             path, static_params);
6688 
6689       /* Jump in the end of basic block could have been removed or replaced
6690          during code_motion_process_successors, so recompute insn as the
6691          last insn in bb.  */
6692       if (NEXT_INSN (last_insn) != insn)
6693         {
6694           insn = sel_bb_end (bb);
6695           first_insn = sel_bb_head (bb);
6696         }
6697 
6698       /* Remove bb tail from path.  */
6699       if (added_to_path)
6700 	ilist_remove (&path);
6701 
6702       if (res != 1)
6703 	{
6704 	  /* This is the case when one of the original expr is no longer available
6705 	     due to bookkeeping created on this branch with the same register.
6706 	     In the original algorithm, which doesn't have update_data_sets call
6707 	     on a bookkeeping block, it would simply result in returning
6708 	     FALSE when we've encountered a previously generated bookkeeping
6709 	     insn in moveop_orig_expr_not_found.  */
6710 	  code_motion_path_driver_cleanup (&orig_ops, &path);
6711 	  return res;
6712 	}
6713     }
6714 
6715   /* Don't need it any more.  */
6716   av_set_clear (&orig_ops);
6717 
6718   /* Backward pass: now, when we have C_EXPR computed, we'll drag it to
6719      the beginning of the basic block.  */
6720   before_first = PREV_INSN (first_insn);
6721   while (insn != before_first)
6722     {
6723       if (code_motion_path_driver_info->ascend)
6724 	code_motion_path_driver_info->ascend (insn, static_params);
6725 
6726       insn = PREV_INSN (insn);
6727     }
6728 
6729   /* Now we're at the bb head.  */
6730   insn = first_insn;
6731   ilist_remove (&path);
6732   local_params_in->removed_last_insn = removed_last_insn;
6733   code_motion_path_driver_info->at_first_insn (insn, local_params_in, static_params);
6734 
6735   /* This should be the very last operation as at bb head we could change
6736      the numbering by creating bookkeeping blocks.  */
6737   if (removed_last_insn)
6738     insn = PREV_INSN (insn);
6739   bitmap_set_bit (code_motion_visited_blocks, BLOCK_FOR_INSN (insn)->index);
6740   return true;
6741 }
6742 
6743 /* Move up the operations from ORIG_OPS set traversing the dag starting
6744    from INSN.  PATH represents the edges traversed so far.
6745    DEST is the register chosen for scheduling the current expr.  Insert
6746    bookkeeping code in the join points.  EXPR_VLIW is the chosen expression,
6747    C_EXPR is how it looks like at the given cfg point.
6748    Set *SHOULD_MOVE to indicate whether we have only disconnected
6749    one of the insns found.
6750 
6751    Returns whether original instructions were found, which is asserted
6752    to be true in the caller.  */
6753 static bool
6754 move_op (insn_t insn, av_set_t orig_ops, expr_t expr_vliw,
6755          rtx dest, expr_t c_expr, bool *should_move)
6756 {
6757   struct moveop_static_params sparams;
6758   struct cmpd_local_params lparams;
6759   int res;
6760 
6761   /* Init params for code_motion_path_driver.  */
6762   sparams.dest = dest;
6763   sparams.c_expr = c_expr;
6764   sparams.uid = INSN_UID (EXPR_INSN_RTX (expr_vliw));
6765 #ifdef ENABLE_CHECKING
6766   sparams.failed_insn = NULL;
6767 #endif
6768   sparams.was_renamed = false;
6769   lparams.e1 = NULL;
6770 
6771   /* We haven't visited any blocks yet.  */
6772   bitmap_clear (code_motion_visited_blocks);
6773 
6774   /* Set appropriate hooks and data.  */
6775   code_motion_path_driver_info = &move_op_hooks;
6776   res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
6777 
6778   gcc_assert (res != -1);
6779 
6780   if (sparams.was_renamed)
6781     EXPR_WAS_RENAMED (expr_vliw) = true;
6782 
6783   *should_move = (sparams.uid == -1);
6784 
6785   return res;
6786 }
6787 
6788 
6789 /* Functions that work with regions.  */
6790 
6791 /* Current number of seqno used in init_seqno and init_seqno_1.  */
6792 static int cur_seqno;
6793 
6794 /* A helper for init_seqno.  Traverse the region starting from BB and
6795    compute seqnos for visited insns, marking visited bbs in VISITED_BBS.
6796    Clear visited blocks from BLOCKS_TO_RESCHEDULE.  */
6797 static void
6798 init_seqno_1 (basic_block bb, sbitmap visited_bbs, bitmap blocks_to_reschedule)
6799 {
6800   int bbi = BLOCK_TO_BB (bb->index);
6801   insn_t insn, note = bb_note (bb);
6802   insn_t succ_insn;
6803   succ_iterator si;
6804 
6805   SET_BIT (visited_bbs, bbi);
6806   if (blocks_to_reschedule)
6807     bitmap_clear_bit (blocks_to_reschedule, bb->index);
6808 
6809   FOR_EACH_SUCC_1 (succ_insn, si, BB_END (bb),
6810 		   SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
6811     {
6812       basic_block succ = BLOCK_FOR_INSN (succ_insn);
6813       int succ_bbi = BLOCK_TO_BB (succ->index);
6814 
6815       gcc_assert (in_current_region_p (succ));
6816 
6817       if (!TEST_BIT (visited_bbs, succ_bbi))
6818 	{
6819 	  gcc_assert (succ_bbi > bbi);
6820 
6821 	  init_seqno_1 (succ, visited_bbs, blocks_to_reschedule);
6822 	}
6823       else if (blocks_to_reschedule)
6824         bitmap_set_bit (forced_ebb_heads, succ->index);
6825     }
6826 
6827   for (insn = BB_END (bb); insn != note; insn = PREV_INSN (insn))
6828     INSN_SEQNO (insn) = cur_seqno--;
6829 }
6830 
6831 /* Initialize seqnos for the current region.  BLOCKS_TO_RESCHEDULE contains
6832    blocks on which we're rescheduling when pipelining, FROM is the block where
6833    traversing region begins (it may not be the head of the region when
6834    pipelining, but the head of the loop instead).
6835 
6836    Returns the maximal seqno found.  */
6837 static int
6838 init_seqno (bitmap blocks_to_reschedule, basic_block from)
6839 {
6840   sbitmap visited_bbs;
6841   bitmap_iterator bi;
6842   unsigned bbi;
6843 
6844   visited_bbs = sbitmap_alloc (current_nr_blocks);
6845 
6846   if (blocks_to_reschedule)
6847     {
6848       sbitmap_ones (visited_bbs);
6849       EXECUTE_IF_SET_IN_BITMAP (blocks_to_reschedule, 0, bbi, bi)
6850         {
6851 	  gcc_assert (BLOCK_TO_BB (bbi) < current_nr_blocks);
6852           RESET_BIT (visited_bbs, BLOCK_TO_BB (bbi));
6853 	}
6854     }
6855   else
6856     {
6857       sbitmap_zero (visited_bbs);
6858       from = EBB_FIRST_BB (0);
6859     }
6860 
6861   cur_seqno = sched_max_luid - 1;
6862   init_seqno_1 (from, visited_bbs, blocks_to_reschedule);
6863 
6864   /* cur_seqno may be positive if the number of instructions is less than
6865      sched_max_luid - 1 (when rescheduling or if some instructions have been
6866      removed by the call to purge_empty_blocks in sel_sched_region_1).  */
6867   gcc_assert (cur_seqno >= 0);
6868 
6869   sbitmap_free (visited_bbs);
6870   return sched_max_luid - 1;
6871 }
6872 
6873 /* Initialize scheduling parameters for current region.  */
6874 static void
6875 sel_setup_region_sched_flags (void)
6876 {
6877   enable_schedule_as_rhs_p = 1;
6878   bookkeeping_p = 1;
6879   pipelining_p = (bookkeeping_p
6880                   && (flag_sel_sched_pipelining != 0)
6881 		  && current_loop_nest != NULL
6882 		  && loop_has_exit_edges (current_loop_nest));
6883   max_insns_to_rename = PARAM_VALUE (PARAM_SELSCHED_INSNS_TO_RENAME);
6884   max_ws = MAX_WS;
6885 }
6886 
6887 /* Return true if all basic blocks of current region are empty.  */
6888 static bool
6889 current_region_empty_p (void)
6890 {
6891   int i;
6892   for (i = 0; i < current_nr_blocks; i++)
6893     if (! sel_bb_empty_p (BASIC_BLOCK (BB_TO_BLOCK (i))))
6894       return false;
6895 
6896   return true;
6897 }
6898 
6899 /* Prepare and verify loop nest for pipelining.  */
6900 static void
6901 setup_current_loop_nest (int rgn, bb_vec_t *bbs)
6902 {
6903   current_loop_nest = get_loop_nest_for_rgn (rgn);
6904 
6905   if (!current_loop_nest)
6906     return;
6907 
6908   /* If this loop has any saved loop preheaders from nested loops,
6909      add these basic blocks to the current region.  */
6910   sel_add_loop_preheaders (bbs);
6911 
6912   /* Check that we're starting with a valid information.  */
6913   gcc_assert (loop_latch_edge (current_loop_nest));
6914   gcc_assert (LOOP_MARKED_FOR_PIPELINING_P (current_loop_nest));
6915 }
6916 
6917 /* Compute instruction priorities for current region.  */
6918 static void
6919 sel_compute_priorities (int rgn)
6920 {
6921   sched_rgn_compute_dependencies (rgn);
6922 
6923   /* Compute insn priorities in haifa style.  Then free haifa style
6924      dependencies that we've calculated for this.  */
6925   compute_priorities ();
6926 
6927   if (sched_verbose >= 5)
6928     debug_rgn_dependencies (0);
6929 
6930   free_rgn_deps ();
6931 }
6932 
6933 /* Init scheduling data for RGN.  Returns true when this region should not
6934    be scheduled.  */
6935 static bool
6936 sel_region_init (int rgn)
6937 {
6938   int i;
6939   bb_vec_t bbs;
6940 
6941   rgn_setup_region (rgn);
6942 
6943   /* Even if sched_is_disabled_for_current_region_p() is true, we still
6944      do region initialization here so the region can be bundled correctly,
6945      but we'll skip the scheduling in sel_sched_region ().  */
6946   if (current_region_empty_p ())
6947     return true;
6948 
6949   bbs = VEC_alloc (basic_block, heap, current_nr_blocks);
6950 
6951   for (i = 0; i < current_nr_blocks; i++)
6952     VEC_quick_push (basic_block, bbs, BASIC_BLOCK (BB_TO_BLOCK (i)));
6953 
6954   sel_init_bbs (bbs);
6955 
6956   if (flag_sel_sched_pipelining)
6957     setup_current_loop_nest (rgn, &bbs);
6958 
6959   sel_setup_region_sched_flags ();
6960 
6961   /* Initialize luids and dependence analysis which both sel-sched and haifa
6962      need.  */
6963   sched_init_luids (bbs);
6964   sched_deps_init (false);
6965 
6966   /* Initialize haifa data.  */
6967   rgn_setup_sched_infos ();
6968   sel_set_sched_flags ();
6969   haifa_init_h_i_d (bbs);
6970 
6971   sel_compute_priorities (rgn);
6972   init_deps_global ();
6973 
6974   /* Main initialization.  */
6975   sel_setup_sched_infos ();
6976   sel_init_global_and_expr (bbs);
6977 
6978   VEC_free (basic_block, heap, bbs);
6979 
6980   blocks_to_reschedule = BITMAP_ALLOC (NULL);
6981 
6982   /* Init correct liveness sets on each instruction of a single-block loop.
6983      This is the only situation when we can't update liveness when calling
6984      compute_live for the first insn of the loop.  */
6985   if (current_loop_nest)
6986     {
6987       int header = (sel_is_loop_preheader_p (BASIC_BLOCK (BB_TO_BLOCK (0)))
6988                     ? 1
6989                     : 0);
6990 
6991       if (current_nr_blocks == header + 1)
6992         update_liveness_on_insn
6993           (sel_bb_head (BASIC_BLOCK (BB_TO_BLOCK (header))));
6994     }
6995 
6996   /* Set hooks so that no newly generated insn will go out unnoticed.  */
6997   sel_register_cfg_hooks ();
6998 
6999   /* !!! We call target.sched.init () for the whole region, but we invoke
7000      targetm.sched.finish () for every ebb.  */
7001   if (targetm.sched.init)
7002     /* None of the arguments are actually used in any target.  */
7003     targetm.sched.init (sched_dump, sched_verbose, -1);
7004 
7005   first_emitted_uid = get_max_uid () + 1;
7006   preheader_removed = false;
7007 
7008   /* Reset register allocation ticks array.  */
7009   memset (reg_rename_tick, 0, sizeof reg_rename_tick);
7010   reg_rename_this_tick = 0;
7011 
7012   bitmap_initialize (forced_ebb_heads, 0);
7013   bitmap_clear (forced_ebb_heads);
7014 
7015   setup_nop_vinsn ();
7016   current_copies = BITMAP_ALLOC (NULL);
7017   current_originators = BITMAP_ALLOC (NULL);
7018   code_motion_visited_blocks = BITMAP_ALLOC (NULL);
7019 
7020   return false;
7021 }
7022 
7023 /* Simplify insns after the scheduling.  */
7024 static void
7025 simplify_changed_insns (void)
7026 {
7027   int i;
7028 
7029   for (i = 0; i < current_nr_blocks; i++)
7030     {
7031       basic_block bb = BASIC_BLOCK (BB_TO_BLOCK (i));
7032       rtx insn;
7033 
7034       FOR_BB_INSNS (bb, insn)
7035 	if (INSN_P (insn))
7036 	  {
7037 	    expr_t expr = INSN_EXPR (insn);
7038 
7039 	    if (EXPR_WAS_SUBSTITUTED (expr))
7040 	      validate_simplify_insn (insn);
7041 	  }
7042     }
7043 }
7044 
7045 /* Find boundaries of the EBB starting from basic block BB, marking blocks of
7046    this EBB in SCHEDULED_BLOCKS and appropriately filling in HEAD, TAIL,
7047    PREV_HEAD, and NEXT_TAIL fields of CURRENT_SCHED_INFO structure.  */
7048 static void
7049 find_ebb_boundaries (basic_block bb, bitmap scheduled_blocks)
7050 {
7051   insn_t head, tail;
7052   basic_block bb1 = bb;
7053   if (sched_verbose >= 2)
7054     sel_print ("Finishing schedule in bbs: ");
7055 
7056   do
7057     {
7058       bitmap_set_bit (scheduled_blocks, BLOCK_TO_BB (bb1->index));
7059 
7060       if (sched_verbose >= 2)
7061 	sel_print ("%d; ", bb1->index);
7062     }
7063   while (!bb_ends_ebb_p (bb1) && (bb1 = bb_next_bb (bb1)));
7064 
7065   if (sched_verbose >= 2)
7066     sel_print ("\n");
7067 
7068   get_ebb_head_tail (bb, bb1, &head, &tail);
7069 
7070   current_sched_info->head = head;
7071   current_sched_info->tail = tail;
7072   current_sched_info->prev_head = PREV_INSN (head);
7073   current_sched_info->next_tail = NEXT_INSN (tail);
7074 }
7075 
7076 /* Regenerate INSN_SCHED_CYCLEs for insns of current EBB.  */
7077 static void
7078 reset_sched_cycles_in_current_ebb (void)
7079 {
7080   int last_clock = 0;
7081   int haifa_last_clock = -1;
7082   int haifa_clock = 0;
7083   int issued_insns = 0;
7084   insn_t insn;
7085 
7086   if (targetm.sched.init)
7087     {
7088       /* None of the arguments are actually used in any target.
7089 	 NB: We should have md_reset () hook for cases like this.  */
7090       targetm.sched.init (sched_dump, sched_verbose, -1);
7091     }
7092 
7093   state_reset (curr_state);
7094   advance_state (curr_state);
7095 
7096   for (insn = current_sched_info->head;
7097        insn != current_sched_info->next_tail;
7098        insn = NEXT_INSN (insn))
7099     {
7100       int cost, haifa_cost;
7101       int sort_p;
7102       bool asm_p, real_insn, after_stall, all_issued;
7103       int clock;
7104 
7105       if (!INSN_P (insn))
7106 	continue;
7107 
7108       asm_p = false;
7109       real_insn = recog_memoized (insn) >= 0;
7110       clock = INSN_SCHED_CYCLE (insn);
7111 
7112       cost = clock - last_clock;
7113 
7114       /* Initialize HAIFA_COST.  */
7115       if (! real_insn)
7116 	{
7117 	  asm_p = INSN_ASM_P (insn);
7118 
7119 	  if (asm_p)
7120 	    /* This is asm insn which *had* to be scheduled first
7121 	       on the cycle.  */
7122 	    haifa_cost = 1;
7123 	  else
7124 	    /* This is a use/clobber insn.  It should not change
7125 	       cost.  */
7126 	    haifa_cost = 0;
7127 	}
7128       else
7129         haifa_cost = estimate_insn_cost (insn, curr_state);
7130 
7131       /* Stall for whatever cycles we've stalled before.  */
7132       after_stall = 0;
7133       if (INSN_AFTER_STALL_P (insn) && cost > haifa_cost)
7134         {
7135           haifa_cost = cost;
7136           after_stall = 1;
7137         }
7138       all_issued = issued_insns == issue_rate;
7139       if (haifa_cost == 0 && all_issued)
7140 	haifa_cost = 1;
7141       if (haifa_cost > 0)
7142 	{
7143 	  int i = 0;
7144 
7145 	  while (haifa_cost--)
7146 	    {
7147 	      advance_state (curr_state);
7148 	      issued_insns = 0;
7149               i++;
7150 
7151 	      if (sched_verbose >= 2)
7152                 {
7153                   sel_print ("advance_state (state_transition)\n");
7154                   debug_state (curr_state);
7155                 }
7156 
7157               /* The DFA may report that e.g. insn requires 2 cycles to be
7158                  issued, but on the next cycle it says that insn is ready
7159                  to go.  Check this here.  */
7160               if (!after_stall
7161                   && real_insn
7162                   && haifa_cost > 0
7163                   && estimate_insn_cost (insn, curr_state) == 0)
7164                 break;
7165 
7166               /* When the data dependency stall is longer than the DFA stall,
7167                  and when we have issued exactly issue_rate insns and stalled,
7168                  it could be that after this longer stall the insn will again
7169                  become unavailable  to the DFA restrictions.  Looks strange
7170                  but happens e.g. on x86-64.  So recheck DFA on the last
7171                  iteration.  */
7172               if ((after_stall || all_issued)
7173                   && real_insn
7174                   && haifa_cost == 0)
7175                 haifa_cost = estimate_insn_cost (insn, curr_state);
7176             }
7177 
7178 	  haifa_clock += i;
7179           if (sched_verbose >= 2)
7180             sel_print ("haifa clock: %d\n", haifa_clock);
7181 	}
7182       else
7183 	gcc_assert (haifa_cost == 0);
7184 
7185       if (sched_verbose >= 2)
7186 	sel_print ("Haifa cost for insn %d: %d\n", INSN_UID (insn), haifa_cost);
7187 
7188       if (targetm.sched.dfa_new_cycle)
7189 	while (targetm.sched.dfa_new_cycle (sched_dump, sched_verbose, insn,
7190 					    haifa_last_clock, haifa_clock,
7191 					    &sort_p))
7192 	  {
7193 	    advance_state (curr_state);
7194 	    issued_insns = 0;
7195 	    haifa_clock++;
7196 	    if (sched_verbose >= 2)
7197               {
7198                 sel_print ("advance_state (dfa_new_cycle)\n");
7199                 debug_state (curr_state);
7200 		sel_print ("haifa clock: %d\n", haifa_clock + 1);
7201               }
7202           }
7203 
7204       if (real_insn)
7205 	{
7206 	  cost = state_transition (curr_state, insn);
7207 	  issued_insns++;
7208 
7209           if (sched_verbose >= 2)
7210 	    {
7211 	      sel_print ("scheduled insn %d, clock %d\n", INSN_UID (insn),
7212 			 haifa_clock + 1);
7213               debug_state (curr_state);
7214 	    }
7215 	  gcc_assert (cost < 0);
7216 	}
7217 
7218       if (targetm.sched.variable_issue)
7219 	targetm.sched.variable_issue (sched_dump, sched_verbose, insn, 0);
7220 
7221       INSN_SCHED_CYCLE (insn) = haifa_clock;
7222 
7223       last_clock = clock;
7224       haifa_last_clock = haifa_clock;
7225     }
7226 }
7227 
7228 /* Put TImode markers on insns starting a new issue group.  */
7229 static void
7230 put_TImodes (void)
7231 {
7232   int last_clock = -1;
7233   insn_t insn;
7234 
7235   for (insn = current_sched_info->head; insn != current_sched_info->next_tail;
7236        insn = NEXT_INSN (insn))
7237     {
7238       int cost, clock;
7239 
7240       if (!INSN_P (insn))
7241 	continue;
7242 
7243       clock = INSN_SCHED_CYCLE (insn);
7244       cost = (last_clock == -1) ? 1 : clock - last_clock;
7245 
7246       gcc_assert (cost >= 0);
7247 
7248       if (issue_rate > 1
7249 	  && GET_CODE (PATTERN (insn)) != USE
7250 	  && GET_CODE (PATTERN (insn)) != CLOBBER)
7251 	{
7252 	  if (reload_completed && cost > 0)
7253 	    PUT_MODE (insn, TImode);
7254 
7255 	  last_clock = clock;
7256 	}
7257 
7258       if (sched_verbose >= 2)
7259 	sel_print ("Cost for insn %d is %d\n", INSN_UID (insn), cost);
7260     }
7261 }
7262 
7263 /* Perform MD_FINISH on EBBs comprising current region.  When
7264    RESET_SCHED_CYCLES_P is true, run a pass emulating the scheduler
7265    to produce correct sched cycles on insns.  */
7266 static void
7267 sel_region_target_finish (bool reset_sched_cycles_p)
7268 {
7269   int i;
7270   bitmap scheduled_blocks = BITMAP_ALLOC (NULL);
7271 
7272   for (i = 0; i < current_nr_blocks; i++)
7273     {
7274       if (bitmap_bit_p (scheduled_blocks, i))
7275 	continue;
7276 
7277       /* While pipelining outer loops, skip bundling for loop
7278 	 preheaders.  Those will be rescheduled in the outer loop.  */
7279       if (sel_is_loop_preheader_p (EBB_FIRST_BB (i)))
7280 	continue;
7281 
7282       find_ebb_boundaries (EBB_FIRST_BB (i), scheduled_blocks);
7283 
7284       if (no_real_insns_p (current_sched_info->head, current_sched_info->tail))
7285 	continue;
7286 
7287       if (reset_sched_cycles_p)
7288 	reset_sched_cycles_in_current_ebb ();
7289 
7290       if (targetm.sched.init)
7291 	targetm.sched.init (sched_dump, sched_verbose, -1);
7292 
7293       put_TImodes ();
7294 
7295       if (targetm.sched.finish)
7296 	{
7297 	  targetm.sched.finish (sched_dump, sched_verbose);
7298 
7299 	  /* Extend luids so that insns generated by the target will
7300 	     get zero luid.  */
7301 	  sched_extend_luids ();
7302 	}
7303     }
7304 
7305   BITMAP_FREE (scheduled_blocks);
7306 }
7307 
7308 /* Free the scheduling data for the current region.  When RESET_SCHED_CYCLES_P
7309    is true, make an additional pass emulating scheduler to get correct insn
7310    cycles for md_finish calls.  */
7311 static void
7312 sel_region_finish (bool reset_sched_cycles_p)
7313 {
7314   simplify_changed_insns ();
7315   sched_finish_ready_list ();
7316   free_nop_pool ();
7317 
7318   /* Free the vectors.  */
7319   if (vec_av_set)
7320     VEC_free (expr_t, heap, vec_av_set);
7321   BITMAP_FREE (current_copies);
7322   BITMAP_FREE (current_originators);
7323   BITMAP_FREE (code_motion_visited_blocks);
7324   vinsn_vec_free (&vec_bookkeeping_blocked_vinsns);
7325   vinsn_vec_free (&vec_target_unavailable_vinsns);
7326 
7327   /* If LV_SET of the region head should be updated, do it now because
7328      there will be no other chance.  */
7329   {
7330     succ_iterator si;
7331     insn_t insn;
7332 
7333     FOR_EACH_SUCC_1 (insn, si, bb_note (EBB_FIRST_BB (0)),
7334                      SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
7335       {
7336 	basic_block bb = BLOCK_FOR_INSN (insn);
7337 
7338 	if (!BB_LV_SET_VALID_P (bb))
7339 	  compute_live (insn);
7340       }
7341   }
7342 
7343   /* Emulate the Haifa scheduler for bundling.  */
7344   if (reload_completed)
7345     sel_region_target_finish (reset_sched_cycles_p);
7346 
7347   sel_finish_global_and_expr ();
7348 
7349   bitmap_clear (forced_ebb_heads);
7350 
7351   free_nop_vinsn ();
7352 
7353   finish_deps_global ();
7354   sched_finish_luids ();
7355   VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
7356 
7357   sel_finish_bbs ();
7358   BITMAP_FREE (blocks_to_reschedule);
7359 
7360   sel_unregister_cfg_hooks ();
7361 
7362   max_issue_size = 0;
7363 }
7364 
7365 
7366 /* Functions that implement the scheduler driver.  */
7367 
7368 /* Schedule a parallel instruction group on each of FENCES.  MAX_SEQNO
7369    is the current maximum seqno.  SCHEDULED_INSNS_TAILPP is the list
7370    of insns scheduled -- these would be postprocessed later.  */
7371 static void
7372 schedule_on_fences (flist_t fences, int max_seqno,
7373                     ilist_t **scheduled_insns_tailpp)
7374 {
7375   flist_t old_fences = fences;
7376 
7377   if (sched_verbose >= 1)
7378     {
7379       sel_print ("\nScheduling on fences: ");
7380       dump_flist (fences);
7381       sel_print ("\n");
7382     }
7383 
7384   scheduled_something_on_previous_fence = false;
7385   for (; fences; fences = FLIST_NEXT (fences))
7386     {
7387       fence_t fence = NULL;
7388       int seqno = 0;
7389       flist_t fences2;
7390       bool first_p = true;
7391 
7392       /* Choose the next fence group to schedule.
7393          The fact that insn can be scheduled only once
7394          on the cycle is guaranteed by two properties:
7395          1. seqnos of parallel groups decrease with each iteration.
7396          2. If is_ineligible_successor () sees the larger seqno, it
7397          checks if candidate insn is_in_current_fence_p ().  */
7398       for (fences2 = old_fences; fences2; fences2 = FLIST_NEXT (fences2))
7399         {
7400           fence_t f = FLIST_FENCE (fences2);
7401 
7402           if (!FENCE_PROCESSED_P (f))
7403             {
7404               int i = INSN_SEQNO (FENCE_INSN (f));
7405 
7406               if (first_p || i > seqno)
7407                 {
7408                   seqno = i;
7409                   fence = f;
7410                   first_p = false;
7411                 }
7412               else
7413                 /* ??? Seqnos of different groups should be different.  */
7414                 gcc_assert (1 || i != seqno);
7415             }
7416         }
7417 
7418       gcc_assert (fence);
7419 
7420       /* As FENCE is nonnull, SEQNO is initialized.  */
7421       seqno -= max_seqno + 1;
7422       fill_insns (fence, seqno, scheduled_insns_tailpp);
7423       FENCE_PROCESSED_P (fence) = true;
7424     }
7425 
7426   /* All av_sets are invalidated by GLOBAL_LEVEL increase, thus we
7427      don't need to keep bookkeeping-invalidated and target-unavailable
7428      vinsns any more.  */
7429   vinsn_vec_clear (&vec_bookkeeping_blocked_vinsns);
7430   vinsn_vec_clear (&vec_target_unavailable_vinsns);
7431 }
7432 
7433 /* Calculate MIN_SEQNO and MAX_SEQNO.  */
7434 static void
7435 find_min_max_seqno (flist_t fences, int *min_seqno, int *max_seqno)
7436 {
7437   *min_seqno = *max_seqno = INSN_SEQNO (FENCE_INSN (FLIST_FENCE (fences)));
7438 
7439   /* The first element is already processed.  */
7440   while ((fences = FLIST_NEXT (fences)))
7441     {
7442       int seqno = INSN_SEQNO (FENCE_INSN (FLIST_FENCE (fences)));
7443 
7444       if (*min_seqno > seqno)
7445         *min_seqno = seqno;
7446       else if (*max_seqno < seqno)
7447         *max_seqno = seqno;
7448     }
7449 }
7450 
7451 /* Calculate new fences from FENCES.  */
7452 static flist_t
7453 calculate_new_fences (flist_t fences, int orig_max_seqno)
7454 {
7455   flist_t old_fences = fences;
7456   struct flist_tail_def _new_fences, *new_fences = &_new_fences;
7457 
7458   flist_tail_init (new_fences);
7459   for (; fences; fences = FLIST_NEXT (fences))
7460     {
7461       fence_t fence = FLIST_FENCE (fences);
7462       insn_t insn;
7463 
7464       if (!FENCE_BNDS (fence))
7465         {
7466           /* This fence doesn't have any successors.  */
7467           if (!FENCE_SCHEDULED_P (fence))
7468             {
7469               /* Nothing was scheduled on this fence.  */
7470               int seqno;
7471 
7472               insn = FENCE_INSN (fence);
7473               seqno = INSN_SEQNO (insn);
7474               gcc_assert (seqno > 0 && seqno <= orig_max_seqno);
7475 
7476               if (sched_verbose >= 1)
7477                 sel_print ("Fence %d[%d] has not changed\n",
7478                            INSN_UID (insn),
7479                            BLOCK_NUM (insn));
7480               move_fence_to_fences (fences, new_fences);
7481             }
7482         }
7483       else
7484         extract_new_fences_from (fences, new_fences, orig_max_seqno);
7485     }
7486 
7487   flist_clear (&old_fences);
7488   return FLIST_TAIL_HEAD (new_fences);
7489 }
7490 
7491 /* Update seqnos of insns given by PSCHEDULED_INSNS.  MIN_SEQNO and MAX_SEQNO
7492    are the miminum and maximum seqnos of the group, HIGHEST_SEQNO_IN_USE is
7493    the highest seqno used in a region.  Return the updated highest seqno.  */
7494 static int
7495 update_seqnos_and_stage (int min_seqno, int max_seqno,
7496                          int highest_seqno_in_use,
7497                          ilist_t *pscheduled_insns)
7498 {
7499   int new_hs;
7500   ilist_iterator ii;
7501   insn_t insn;
7502 
7503   /* Actually, new_hs is the seqno of the instruction, that was
7504      scheduled first (i.e. it is the first one in SCHEDULED_INSNS).  */
7505   if (*pscheduled_insns)
7506     {
7507       new_hs = (INSN_SEQNO (ILIST_INSN (*pscheduled_insns))
7508                 + highest_seqno_in_use + max_seqno - min_seqno + 2);
7509       gcc_assert (new_hs > highest_seqno_in_use);
7510     }
7511   else
7512     new_hs = highest_seqno_in_use;
7513 
7514   FOR_EACH_INSN (insn, ii, *pscheduled_insns)
7515     {
7516       gcc_assert (INSN_SEQNO (insn) < 0);
7517       INSN_SEQNO (insn) += highest_seqno_in_use + max_seqno - min_seqno + 2;
7518       gcc_assert (INSN_SEQNO (insn) <= new_hs);
7519 
7520       /* When not pipelining, purge unneeded insn info on the scheduled insns.
7521          For example, having reg_last array of INSN_DEPS_CONTEXT in memory may
7522          require > 1GB of memory e.g. on limit-fnargs.c.  */
7523       if (! pipelining_p)
7524         free_data_for_scheduled_insn (insn);
7525     }
7526 
7527   ilist_clear (pscheduled_insns);
7528   global_level++;
7529 
7530   return new_hs;
7531 }
7532 
7533 /* The main driver for scheduling a region.  This function is responsible
7534    for correct propagation of fences (i.e. scheduling points) and creating
7535    a group of parallel insns at each of them.  It also supports
7536    pipelining.  ORIG_MAX_SEQNO is the maximal seqno before this pass
7537    of scheduling.  */
7538 static void
7539 sel_sched_region_2 (int orig_max_seqno)
7540 {
7541   int highest_seqno_in_use = orig_max_seqno;
7542 
7543   stat_bookkeeping_copies = 0;
7544   stat_insns_needed_bookkeeping = 0;
7545   stat_renamed_scheduled = 0;
7546   stat_substitutions_total = 0;
7547   num_insns_scheduled = 0;
7548 
7549   while (fences)
7550     {
7551       int min_seqno, max_seqno;
7552       ilist_t scheduled_insns = NULL;
7553       ilist_t *scheduled_insns_tailp = &scheduled_insns;
7554 
7555       find_min_max_seqno (fences, &min_seqno, &max_seqno);
7556       schedule_on_fences (fences, max_seqno, &scheduled_insns_tailp);
7557       fences = calculate_new_fences (fences, orig_max_seqno);
7558       highest_seqno_in_use = update_seqnos_and_stage (min_seqno, max_seqno,
7559                                                       highest_seqno_in_use,
7560                                                       &scheduled_insns);
7561     }
7562 
7563   if (sched_verbose >= 1)
7564     sel_print ("Scheduled %d bookkeeping copies, %d insns needed "
7565                "bookkeeping, %d insns renamed, %d insns substituted\n",
7566                stat_bookkeeping_copies,
7567                stat_insns_needed_bookkeeping,
7568                stat_renamed_scheduled,
7569                stat_substitutions_total);
7570 }
7571 
7572 /* Schedule a region.  When pipelining, search for possibly never scheduled
7573    bookkeeping code and schedule it.  Reschedule pipelined code without
7574    pipelining after.  */
7575 static void
7576 sel_sched_region_1 (void)
7577 {
7578   int orig_max_seqno;
7579 
7580   /* Remove empty blocks that might be in the region from the beginning.  */
7581   purge_empty_blocks ();
7582 
7583   orig_max_seqno = init_seqno (NULL, NULL);
7584   gcc_assert (orig_max_seqno >= 1);
7585 
7586   /* When pipelining outer loops, create fences on the loop header,
7587      not preheader.  */
7588   fences = NULL;
7589   if (current_loop_nest)
7590     init_fences (BB_END (EBB_FIRST_BB (0)));
7591   else
7592     init_fences (bb_note (EBB_FIRST_BB (0)));
7593   global_level = 1;
7594 
7595   sel_sched_region_2 (orig_max_seqno);
7596 
7597   gcc_assert (fences == NULL);
7598 
7599   if (pipelining_p)
7600     {
7601       int i;
7602       basic_block bb;
7603       struct flist_tail_def _new_fences;
7604       flist_tail_t new_fences = &_new_fences;
7605       bool do_p = true;
7606 
7607       pipelining_p = false;
7608       max_ws = MIN (max_ws, issue_rate * 3 / 2);
7609       bookkeeping_p = false;
7610       enable_schedule_as_rhs_p = false;
7611 
7612       /* Schedule newly created code, that has not been scheduled yet.  */
7613       do_p = true;
7614 
7615       while (do_p)
7616         {
7617           do_p = false;
7618 
7619           for (i = 0; i < current_nr_blocks; i++)
7620             {
7621               basic_block bb = EBB_FIRST_BB (i);
7622 
7623               if (bitmap_bit_p (blocks_to_reschedule, bb->index))
7624                 {
7625                   if (! bb_ends_ebb_p (bb))
7626                     bitmap_set_bit (blocks_to_reschedule, bb_next_bb (bb)->index);
7627                   if (sel_bb_empty_p (bb))
7628                     {
7629                       bitmap_clear_bit (blocks_to_reschedule, bb->index);
7630                       continue;
7631                     }
7632                   clear_outdated_rtx_info (bb);
7633                   if (sel_insn_is_speculation_check (BB_END (bb))
7634                       && JUMP_P (BB_END (bb)))
7635                     bitmap_set_bit (blocks_to_reschedule,
7636                                     BRANCH_EDGE (bb)->dest->index);
7637                 }
7638               else if (! sel_bb_empty_p (bb)
7639                        && INSN_SCHED_TIMES (sel_bb_head (bb)) <= 0)
7640                 bitmap_set_bit (blocks_to_reschedule, bb->index);
7641             }
7642 
7643           for (i = 0; i < current_nr_blocks; i++)
7644             {
7645               bb = EBB_FIRST_BB (i);
7646 
7647               /* While pipelining outer loops, skip bundling for loop
7648                  preheaders.  Those will be rescheduled in the outer
7649                  loop.  */
7650               if (sel_is_loop_preheader_p (bb))
7651                 {
7652                   clear_outdated_rtx_info (bb);
7653                   continue;
7654                 }
7655 
7656               if (bitmap_bit_p (blocks_to_reschedule, bb->index))
7657                 {
7658                   flist_tail_init (new_fences);
7659 
7660                   orig_max_seqno = init_seqno (blocks_to_reschedule, bb);
7661 
7662                   /* Mark BB as head of the new ebb.  */
7663                   bitmap_set_bit (forced_ebb_heads, bb->index);
7664 
7665                   gcc_assert (fences == NULL);
7666 
7667                   init_fences (bb_note (bb));
7668 
7669                   sel_sched_region_2 (orig_max_seqno);
7670 
7671                   do_p = true;
7672                   break;
7673                 }
7674             }
7675         }
7676     }
7677 }
7678 
7679 /* Schedule the RGN region.  */
7680 void
7681 sel_sched_region (int rgn)
7682 {
7683   bool schedule_p;
7684   bool reset_sched_cycles_p;
7685 
7686   if (sel_region_init (rgn))
7687     return;
7688 
7689   if (sched_verbose >= 1)
7690     sel_print ("Scheduling region %d\n", rgn);
7691 
7692   schedule_p = (!sched_is_disabled_for_current_region_p ()
7693                 && dbg_cnt (sel_sched_region_cnt));
7694   reset_sched_cycles_p = pipelining_p;
7695   if (schedule_p)
7696     sel_sched_region_1 ();
7697   else
7698     /* Force initialization of INSN_SCHED_CYCLEs for correct bundling.  */
7699     reset_sched_cycles_p = true;
7700 
7701   sel_region_finish (reset_sched_cycles_p);
7702 }
7703 
7704 /* Perform global init for the scheduler.  */
7705 static void
7706 sel_global_init (void)
7707 {
7708   calculate_dominance_info (CDI_DOMINATORS);
7709   alloc_sched_pools ();
7710 
7711   /* Setup the infos for sched_init.  */
7712   sel_setup_sched_infos ();
7713   setup_sched_dump ();
7714 
7715   sched_rgn_init (false);
7716   sched_init ();
7717 
7718   sched_init_bbs ();
7719   /* Reset AFTER_RECOVERY if it has been set by the 1st scheduler pass.  */
7720   after_recovery = 0;
7721   can_issue_more = issue_rate;
7722 
7723   sched_extend_target ();
7724   sched_deps_init (true);
7725   setup_nop_and_exit_insns ();
7726   sel_extend_global_bb_info ();
7727   init_lv_sets ();
7728   init_hard_regs_data ();
7729 }
7730 
7731 /* Free the global data of the scheduler.  */
7732 static void
7733 sel_global_finish (void)
7734 {
7735   free_bb_note_pool ();
7736   free_lv_sets ();
7737   sel_finish_global_bb_info ();
7738 
7739   free_regset_pool ();
7740   free_nop_and_exit_insns ();
7741 
7742   sched_rgn_finish ();
7743   sched_deps_finish ();
7744   sched_finish ();
7745 
7746   if (current_loops)
7747     sel_finish_pipelining ();
7748 
7749   free_sched_pools ();
7750   free_dominance_info (CDI_DOMINATORS);
7751 }
7752 
7753 /* Return true when we need to skip selective scheduling.  Used for debugging.  */
7754 bool
7755 maybe_skip_selective_scheduling (void)
7756 {
7757   return ! dbg_cnt (sel_sched_cnt);
7758 }
7759 
7760 /* The entry point.  */
7761 void
7762 run_selective_scheduling (void)
7763 {
7764   int rgn;
7765 
7766   if (n_basic_blocks == NUM_FIXED_BLOCKS)
7767     return;
7768 
7769   sel_global_init ();
7770 
7771   for (rgn = 0; rgn < nr_regions; rgn++)
7772     sel_sched_region (rgn);
7773 
7774   sel_global_finish ();
7775 }
7776 
7777 #endif
7778